diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..b12854db9eb7545e2bea7fbc817c4065bcac4448
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,42 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+google/umt5-xxl/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+xlm-roberta-large/onnx/model.onnx_data filter=lfs diff=lfs merge=lfs -text
+xlm-roberta-large/onnx/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+assets/comp_effic.png filter=lfs diff=lfs merge=lfs -text
+assets/moe_2.png filter=lfs diff=lfs merge=lfs -text
+assets/performance.png filter=lfs diff=lfs merge=lfs -text
+assets/vae.png filter=lfs diff=lfs merge=lfs -text
diff --git a/.msc b/.msc
new file mode 100644
index 0000000000000000000000000000000000000000..a4f92bf7ea50423e4d35497ed7491db200efbdee
Binary files /dev/null and b/.msc differ
diff --git a/.mv b/.mv
new file mode 100644
index 0000000000000000000000000000000000000000..cb8e4f268188250296d4cba31d447fb9cb4768dc
--- /dev/null
+++ b/.mv
@@ -0,0 +1 @@
+Revision:master,CreatedAt:1757594242
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c7bced2da35ab0f8a452bf5097ff93859a49af56
--- /dev/null
+++ b/README.md
@@ -0,0 +1,278 @@
+---
+license: apache-2.0
+---
+# Wan2.2
+
+
+
+
+
+
+ 💜 Wan    |    🖥️ GitHub    |   🤗 Hugging Face    |   🤖 ModelScope    |    📑 Paper    |    📑 Blog    |    💬 Discord   
+
+ 📕 使用指南(中文)    |    📘 User Guide(English)    |   💬 WeChat(微信)   
+
+
+-----
+
+[**Wan: Open and Advanced Large-Scale Video Generative Models**](https://arxiv.org/abs/2503.20314)
+
+
+We are excited to introduce **Wan2.2**, a major upgrade to our foundational video models. With **Wan2.2**, we have focused on incorporating the following innovations:
+
+- 👍 **Effective MoE Architecture**: Wan2.2 introduces a Mixture-of-Experts (MoE) architecture into video diffusion models. By separating the denoising process cross timesteps with specialized powerful expert models, this enlarges the overall model capacity while maintaining the same computational cost.
+
+- 👍 **Cinematic-level Aesthetics**: Wan2.2 incorporates meticulously curated aesthetic data, complete with detailed labels for lighting, composition, contrast, color tone, and more. This allows for more precise and controllable cinematic style generation, facilitating the creation of videos with customizable aesthetic preferences.
+
+- 👍 **Complex Motion Generation**: Compared to Wan2.1, Wan2.2 is trained on a significantly larger data, with +65.6% more images and +83.2% more videos. This expansion notably enhances the model's generalization across multiple dimensions such as motions, semantics, and aesthetics, achieving TOP performance among all open-sourced and closed-sourced models.
+
+- 👍 **Efficient High-Definition Hybrid TI2V**: Wan2.2 open-sources a 5B model built with our advanced Wan2.2-VAE that achieves a compression ratio of **16×16×4**. This model supports both text-to-video and image-to-video generation at 720P resolution with 24fps and can also run on consumer-grade graphics cards like 4090. It is one of the fastest **720P@24fps** models currently available, capable of serving both the industrial and academic sectors simultaneously.
+
+
+## Video Demos
+
+
+
+
+ Your browser does not support the video tag.
+
+
+
+## 🔥 Latest News!!
+
+* Sep 19, 2025: 💃 We introduct **[Wan2.2-Animate-14B](https://humanaigc.github.io/wan-animate)**, an unified model for character animation and replacement with holistic movement and expression replication. We released the [model weights](#model-download) and [inference code](#run-with-wan-animate). And now you can try it on [wan.video](https://wan.video/), [ModelScope Studio](https://www.modelscope.cn/studios/Wan-AI/Wan2.2-Animate) or [HuggingFace Space](https://huggingface.co/spaces/Wan-AI/Wan2.2-Animate)!
+* Aug 26, 2025: 🎵 We introduce **[Wan2.2-S2V-14B](https://humanaigc.github.io/wan-s2v-webpage)**, an audio-driven cinematic video generation model, including [inference code](#run-speech-to-video-generation), [model weights](#model-download), and [technical report](https://humanaigc.github.io/wan-s2v-webpage/content/wan-s2v.pdf)! Now you can try it on [wan.video](https://wan.video/), [ModelScope Gradio](https://www.modelscope.cn/studios/Wan-AI/Wan2.2-S2V) or [HuggingFace Gradio](https://huggingface.co/spaces/Wan-AI/Wan2.2-S2V)!
+* Jul 28, 2025: 👋 We have open a [HF space](https://huggingface.co/spaces/Wan-AI/Wan-2.2-5B) using the TI2V-5B model. Enjoy!
+* Jul 28, 2025: 👋 Wan2.2 has been integrated into ComfyUI ([CN](https://docs.comfy.org/zh-CN/tutorials/video/wan/wan2_2) | [EN](https://docs.comfy.org/tutorials/video/wan/wan2_2)). Enjoy!
+* Jul 28, 2025: 👋 Wan2.2's T2V, I2V and TI2V have been integrated into Diffusers ([T2V-A14B](https://huggingface.co/Wan-AI/Wan2.2-T2V-A14B-Diffusers) | [I2V-A14B](https://huggingface.co/Wan-AI/Wan2.2-I2V-A14B-Diffusers) | [TI2V-5B](https://huggingface.co/Wan-AI/Wan2.2-TI2V-5B-Diffusers)). Feel free to give it a try!
+* Jul 28, 2025: 👋 We've released the inference code and model weights of **Wan2.2**.
+* Sep 5, 2025: 👋 We add text-to-speech synthesis support with [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) for Speech-to-Video generation task.
+
+
+## Community Works
+If your research or project builds upon [**Wan2.1**](https://github.com/Wan-Video/Wan2.1) or [**Wan2.2**](https://github.com/Wan-Video/Wan2.2), and you would like more people to see it, please inform us.
+
+- [DiffSynth-Studio](https://github.com/modelscope/DiffSynth-Studio) provides comprehensive support for Wan 2.2, including low-GPU-memory layer-by-layer offload, FP8 quantization, sequence parallelism, LoRA training, full training.
+- [Kijai's ComfyUI WanVideoWrapper](https://github.com/kijai/ComfyUI-WanVideoWrapper) is an alternative implementation of Wan models for ComfyUI. Thanks to its Wan-only focus, it's on the frontline of getting cutting edge optimizations and hot research features, which are often hard to integrate into ComfyUI quickly due to its more rigid structure.
+- [Cache-dit](https://github.com/vipshop/cache-dit) offers Fully Cache Acceleration support for Wan2.2 MoE with DBCache, TaylorSeer and Cache CFG. Visit their [example](https://github.com/vipshop/cache-dit/blob/main/examples/pipeline/run_wan_2.2.py) for more details.
+- [FastVideo](https://github.com/hao-ai-lab/FastVideo) includes distilled Wan models with sparse attention that significanly speed up the inference time.
+
+## 📑 Todo List
+- Wan2.2 Text-to-Video
+ - [x] Multi-GPU Inference code of the A14B and 14B models
+ - [x] Checkpoints of the A14B and 14B models
+ - [x] ComfyUI integration
+ - [x] Diffusers integration
+- Wan2.2 Image-to-Video
+ - [x] Multi-GPU Inference code of the A14B model
+ - [x] Checkpoints of the A14B model
+ - [x] ComfyUI integration
+ - [x] Diffusers integration
+- Wan2.2 Text-Image-to-Video
+ - [x] Multi-GPU Inference code of the 5B model
+ - [x] Checkpoints of the 5B model
+ - [x] ComfyUI integration
+ - [x] Diffusers integration
+- Wan2.2-S2V Speech-to-Video
+ - [x] Inference code of Wan2.2-S2V
+ - [x] Checkpoints of Wan2.2-S2V-14B
+ - [x] ComfyUI integration
+ - [x] Diffusers integration
+- Wan2.2-Animate Character Animation and Replacement
+ - [x] Inference code of Wan2.2-Animate
+ - [x] Checkpoints of Wan2.2-Animate
+ - [x] ComfyUI integration
+ - [ ] Diffusers integration
+
+## Run Wan2.2 Animate
+
+#### Installation
+Clone the repo:
+```sh
+git clone https://github.com/Wan-Video/Wan2.2.git
+cd Wan2.2
+```
+
+Install dependencies:
+```sh
+# Ensure torch >= 2.4.0
+# If the installation of `flash_attn` fails, try installing the other packages first and install `flash_attn` last
+pip install -r requirements.txt
+# If you want to use CosyVoice to synthesize speech for Speech-to-Video Generation, please install requirements_s2v.txt additionally
+pip install -r requirements_s2v.txt
+```
+
+
+#### Model Download
+
+| Models | Download Links | Description |
+|--------------------|---------------------------------------------------------------------------------------------------------------------------------------------|-------------|
+| T2V-A14B | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.2-T2V-A14B) 🤖 [ModelScope](https://modelscope.cn/models/Wan-AI/Wan2.2-T2V-A14B) | Text-to-Video MoE model, supports 480P & 720P |
+| I2V-A14B | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.2-I2V-A14B) 🤖 [ModelScope](https://modelscope.cn/models/Wan-AI/Wan2.2-I2V-A14B) | Image-to-Video MoE model, supports 480P & 720P |
+| TI2V-5B | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.2-TI2V-5B) 🤖 [ModelScope](https://modelscope.cn/models/Wan-AI/Wan2.2-TI2V-5B) | High-compression VAE, T2V+I2V, supports 720P |
+| S2V-14B | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.2-S2V-14B) 🤖 [ModelScope](https://modelscope.cn/models/Wan-AI/Wan2.2-S2V-14B) | Speech-to-Video model, supports 480P & 720P |
+| Animate-14B | 🤗 [Huggingface](https://huggingface.co/Wan-AI/Wan2.2-Animate-14B) 🤖 [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.2-Animate-14B) | Character animation and replacement | |
+
+
+Download models using huggingface-cli:
+``` sh
+pip install "huggingface_hub[cli]"
+huggingface-cli download Wan-AI/Wan2.2-Animate-14B --local-dir ./Wan2.2-Animate-14B
+```
+
+Download models using modelscope-cli:
+``` sh
+pip install modelscope
+modelscope download Wan-AI/Wan2.2-Animate-14B --local_dir ./Wan2.2-Animate-14B
+```
+
+#### Run Wan-Animate-14B
+
+Wan-Animate takes a video and a character image as input, and generates a video in either "animation" or "replacement" mode.
+
+1. animation mode: The model generates a video of the character image that mimics the human motion in the input video.
+2. replacement mode: The model replaces the character image with the input video.
+
+Please visit our [project page](https://humanaigc.github.io/wan-animate) to see more examples and learn about the scenarios suitable for this model.
+
+##### (1) Preprocessing
+The input video should be preprocessed into several materials before be feed into the inference process. Please refer to the following processing flow, and more details about preprocessing can be found in [UserGuider](https://github.com/Wan-Video/Wan2.2/blob/main/wan/modules/animate/preprocess/UserGuider.md).
+
+* For animation
+```bash
+python ./wan/modules/animate/preprocess/preprocess_data.py \
+ --ckpt_path ./Wan2.2-Animate-14B/process_checkpoint \
+ --video_path ./examples/wan_animate/animate/video.mp4 \
+ --refer_path ./examples/wan_animate/animate/image.jpeg \
+ --save_path ./examples/wan_animate/animate/process_results \
+ --resolution_area 1280 720 \
+ --retarget_flag \
+ --use_flux
+```
+* For replacement
+```bash
+python ./wan/modules/animate/preprocess/preprocess_data.py \
+ --ckpt_path ./Wan2.2-Animate-14B/process_checkpoint \
+ --video_path ./examples/wan_animate/replace/video.mp4 \
+ --refer_path ./examples/wan_animate/replace/image.jpeg \
+ --save_path ./examples/wan_animate/replace/process_results \
+ --resolution_area 1280 720 \
+ --iterations 3 \
+ --k 7 \
+ --w_len 1 \
+ --h_len 1 \
+ --replace_flag
+```
+##### (2) Run in animation mode
+
+* Single-GPU inference
+
+```bash
+python generate.py --task animate-14B --ckpt_dir ./Wan2.2-Animate-14B/ --src_root_path ./examples/wan_animate/animate/process_results/ --refert_num 1
+```
+
+* Multi-GPU inference using FSDP + DeepSpeed Ulysses
+
+```bash
+python -m torch.distributed.run --nnodes 1 --nproc_per_node 8 generate.py --task animate-14B --ckpt_dir ./Wan2.2-Animate-14B/ --src_root_path ./examples/wan_animate/animate/process_results/ --refert_num 1 --dit_fsdp --t5_fsdp --ulysses_size 8
+```
+
+##### (3) Run in replacement mode
+
+* Single-GPU inference
+
+```bash
+python generate.py --task animate-14B --ckpt_dir ./Wan2.2-Animate-14B/ --src_root_path ./examples/wan_animate/replace/process_results/ --refert_num 1 --replace_flag --use_relighting_lora
+```
+
+* Multi-GPU inference using FSDP + DeepSpeed Ulysses
+
+```bash
+python -m torch.distributed.run --nnodes 1 --nproc_per_node 8 generate.py --task animate-14B --ckpt_dir ./Wan2.2-Animate-14B/ --src_root_path ./examples/wan_animate/replace/process_results/src_pose.mp4 --refert_num 1 --replace_flag --use_relighting_lora --dit_fsdp --t5_fsdp --ulysses_size 8
+```
+
+> 💡 If you're using **Wan-Animate**, we do not recommend using LoRA models trained on `Wan2.2`, since weight changes during training may lead to unexpected behavior.
+
+## Computational Efficiency on Different GPUs
+
+We test the computational efficiency of different **Wan2.2** models on different GPUs in the following table. The results are presented in the format: **Total time (s) / peak GPU memory (GB)**.
+
+
+
+
+
+
+> The parameter settings for the tests presented in this table are as follows:
+> (1) Multi-GPU: 14B: `--ulysses_size 4/8 --dit_fsdp --t5_fsdp`, 5B: `--ulysses_size 4/8 --offload_model True --convert_model_dtype --t5_cpu`; Single-GPU: 14B: `--offload_model True --convert_model_dtype`, 5B: `--offload_model True --convert_model_dtype --t5_cpu`
+(--convert_model_dtype converts model parameter types to config.param_dtype);
+> (2) The distributed testing utilizes the built-in FSDP and Ulysses implementations, with FlashAttention3 deployed on Hopper architecture GPUs;
+> (3) Tests were run without the `--use_prompt_extend` flag;
+> (4) Reported results are the average of multiple samples taken after the warm-up phase.
+
+
+-------
+
+## Introduction of Wan2.2
+
+**Wan2.2** builds on the foundation of Wan2.1 with notable improvements in generation quality and model capability. This upgrade is driven by a series of key technical innovations, mainly including the Mixture-of-Experts (MoE) architecture, upgraded training data, and high-compression video generation.
+
+##### (1) Mixture-of-Experts (MoE) Architecture
+
+Wan2.2 introduces Mixture-of-Experts (MoE) architecture into the video generation diffusion model. MoE has been widely validated in large language models as an efficient approach to increase total model parameters while keeping inference cost nearly unchanged. In Wan2.2, the A14B model series adopts a two-expert design tailored to the denoising process of diffusion models: a high-noise expert for the early stages, focusing on overall layout; and a low-noise expert for the later stages, refining video details. Each expert model has about 14B parameters, resulting in a total of 27B parameters but only 14B active parameters per step, keeping inference computation and GPU memory nearly unchanged.
+
+
+
+
+
+The transition point between the two experts is determined by the signal-to-noise ratio (SNR), a metric that decreases monotonically as the denoising step $t$ increases. At the beginning of the denoising process, $t$ is large and the noise level is high, so the SNR is at its minimum, denoted as ${SNR}_{min}$. In this stage, the high-noise expert is activated. We define a threshold step ${t}_{moe}$ corresponding to half of the ${SNR}_{min}$, and switch to the low-noise expert when $t<{t}_{moe}$.
+
+
+
+
+
+To validate the effectiveness of the MoE architecture, four settings are compared based on their validation loss curves. The baseline **Wan2.1** model does not employ the MoE architecture. Among the MoE-based variants, the **Wan2.1 & High-Noise Expert** reuses the Wan2.1 model as the low-noise expert while uses the Wan2.2's high-noise expert, while the **Wan2.1 & Low-Noise Expert** uses Wan2.1 as the high-noise expert and employ the Wan2.2's low-noise expert. The **Wan2.2 (MoE)** (our final version) achieves the lowest validation loss, indicating that its generated video distribution is closest to ground-truth and exhibits superior convergence.
+
+
+##### (2) Efficient High-Definition Hybrid TI2V
+To enable more efficient deployment, Wan2.2 also explores a high-compression design. In addition to the 27B MoE models, a 5B dense model, i.e., TI2V-5B, is released. It is supported by a high-compression Wan2.2-VAE, which achieves a $T\times H\times W$ compression ratio of $4\times16\times16$, increasing the overall compression rate to 64 while maintaining high-quality video reconstruction. With an additional patchification layer, the total compression ratio of TI2V-5B reaches $4\times32\times32$. Without specific optimization, TI2V-5B can generate a 5-second 720P video in under 9 minutes on a single consumer-grade GPU, ranking among the fastest 720P@24fps video generation models. This model also natively supports both text-to-video and image-to-video tasks within a single unified framework, covering both academic research and practical applications.
+
+
+
+
+
+
+
+
+##### Comparisons to SOTAs
+We compared Wan2.2 with leading closed-source commercial models on our new Wan-Bench 2.0, evaluating performance across multiple crucial dimensions. The results demonstrate that Wan2.2 achieves superior performance compared to these leading models.
+
+
+
+
+
+
+## Citation
+If you find our work helpful, please cite us.
+
+```
+@article{wan2025,
+ title={Wan: Open and Advanced Large-Scale Video Generative Models},
+ author={Team Wan and Ang Wang and Baole Ai and Bin Wen and Chaojie Mao and Chen-Wei Xie and Di Chen and Feiwu Yu and Haiming Zhao and Jianxiao Yang and Jianyuan Zeng and Jiayu Wang and Jingfeng Zhang and Jingren Zhou and Jinkai Wang and Jixuan Chen and Kai Zhu and Kang Zhao and Keyu Yan and Lianghua Huang and Mengyang Feng and Ningyi Zhang and Pandeng Li and Pingyu Wu and Ruihang Chu and Ruili Feng and Shiwei Zhang and Siyang Sun and Tao Fang and Tianxing Wang and Tianyi Gui and Tingyu Weng and Tong Shen and Wei Lin and Wei Wang and Wei Wang and Wenmeng Zhou and Wente Wang and Wenting Shen and Wenyuan Yu and Xianzhong Shi and Xiaoming Huang and Xin Xu and Yan Kou and Yangyu Lv and Yifei Li and Yijing Liu and Yiming Wang and Yingya Zhang and Yitong Huang and Yong Li and You Wu and Yu Liu and Yulin Pan and Yun Zheng and Yuntao Hong and Yupeng Shi and Yutong Feng and Zeyinzi Jiang and Zhen Han and Zhi-Fan Wu and Ziyu Liu},
+ journal = {arXiv preprint arXiv:2503.20314},
+ year={2025}
+}
+```
+
+## License Agreement
+The models in this repository are licensed under the Apache 2.0 License. We claim no rights over the your generated contents, granting you the freedom to use them while ensuring that your usage complies with the provisions of this license. You are fully accountable for your use of the models, which must not involve sharing any content that violates applicable laws, causes harm to individuals or groups, disseminates personal information intended for harm, spreads misinformation, or targets vulnerable populations. For a complete list of restrictions and details regarding your rights, please refer to the full text of the [license](LICENSE.txt).
+
+
+## Acknowledgements
+
+We would like to thank the contributors to the [SD3](https://huggingface.co/stabilityai/stable-diffusion-3-medium), [Qwen](https://huggingface.co/Qwen), [umt5-xxl](https://huggingface.co/google/umt5-xxl), [diffusers](https://github.com/huggingface/diffusers) and [HuggingFace](https://huggingface.co) repositories, for their open research.
+
+
+
+## Contact Us
+If you would like to leave a message to our research or product teams, feel free to join our [Discord](https://discord.gg/AKNgpMK4Yj) or [WeChat groups](https://gw.alicdn.com/imgextra/i2/O1CN01tqjWFi1ByuyehkTSB_!!6000000000015-0-tps-611-1279.jpg)!
+
diff --git a/Wan2.1_VAE.pth b/Wan2.1_VAE.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fbe5a2c64e0430c3fe40a7ce8dccfead62106a17
--- /dev/null
+++ b/Wan2.1_VAE.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:acf6c5aa49ad281d4b561e10656e2397c446a8ba4b8d8f19d3dd125c2628bc6a
+size 507609928
diff --git a/assets/comp_effic.png b/assets/comp_effic.png
new file mode 100644
index 0000000000000000000000000000000000000000..bdc26da30b575b80d78f5972a08811eca9a6c455
--- /dev/null
+++ b/assets/comp_effic.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:75ee012dcfb08365bec67a3ec7afc126fc2817f79b9f80e38711792d4770e32b
+size 202156
diff --git a/assets/logo.png b/assets/logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..0c55854cbd9692975f217714ffd83fd4b37f5dca
Binary files /dev/null and b/assets/logo.png differ
diff --git a/assets/moe_2.png b/assets/moe_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..c788c691cf96bbf9c0598cb440c23935848e8619
--- /dev/null
+++ b/assets/moe_2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4ea471ccb64349bd08bc9a78f336ae000e9ca3b40da9a652b8028b214a8c6093
+size 527914
diff --git a/assets/moe_arch.png b/assets/moe_arch.png
new file mode 100644
index 0000000000000000000000000000000000000000..7822af1e65215ee2a9449c9b7616afd713f67a01
Binary files /dev/null and b/assets/moe_arch.png differ
diff --git a/assets/performance.png b/assets/performance.png
new file mode 100644
index 0000000000000000000000000000000000000000..ca558e3e33efc415814aeb3b103ae2e2b34ba233
--- /dev/null
+++ b/assets/performance.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97ef99c13c8ae717a8a11c8d8ec927b69077c647cc6689755d08fc38e7fbb830
+size 306535
diff --git a/assets/vae.png b/assets/vae.png
new file mode 100644
index 0000000000000000000000000000000000000000..7d290425cf3f7ab20e2d80bece0d780259b24303
--- /dev/null
+++ b/assets/vae.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4aaea5e187f1c5908e15ade5bef24c9fb59882986bc3d2ad75f7fe820f3d772f
+size 165486
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..1dac261c9669941c74e9c313545e225863d9bb15
--- /dev/null
+++ b/config.json
@@ -0,0 +1,30 @@
+{
+ "__name__": "Config: Transformer config for WanAnimateModel",
+ "_class_name": "WanAnimateModel",
+ "_diffusers_version": "0.33.1",
+ "_name_or_path": "Wan-animate",
+ "cross_attn_norm": true,
+ "dim": 5120,
+ "eps": 1e-06,
+ "ffn_dim": 13824,
+ "freq_dim": 256,
+ "in_dim": 36,
+ "motion_encoder_dim": 512,
+ "num_heads": 40,
+ "num_layers": 40,
+ "out_dim": 16,
+ "patch_size": [
+ 1,
+ 2,
+ 2
+ ],
+ "qk_norm": true,
+ "text_dim": 4096,
+ "text_len": 512,
+ "use_context_parallel": false,
+ "use_img_emb": true,
+ "window_size": [
+ -1,
+ -1
+ ]
+}
diff --git a/configuration.json b/configuration.json
new file mode 100644
index 0000000000000000000000000000000000000000..d64db5cb6ceecfb2c7f0929427dc5f48bc4b7f37
--- /dev/null
+++ b/configuration.json
@@ -0,0 +1 @@
+{"framework":"Pytorch","task":"any-to-any"}
\ No newline at end of file
diff --git a/diffusion_pytorch_model-00001-of-00004.safetensors b/diffusion_pytorch_model-00001-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bbe17b6223b3c43f6f03f677ad1457608e7de18f
--- /dev/null
+++ b/diffusion_pytorch_model-00001-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:575c2dba750c3b40240fb742a4224453aa97dfbd3c5f5a0086be431cdefdd69c
+size 9875257880
diff --git a/diffusion_pytorch_model-00002-of-00004.safetensors b/diffusion_pytorch_model-00002-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6d718b789681b47141a5db0bae49ccd8bf06e519
--- /dev/null
+++ b/diffusion_pytorch_model-00002-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b90b820627d43eeeb1ae0489182f9a8c870374fd72cc99dccb9eddfc2ace8325
+size 9975378288
diff --git a/diffusion_pytorch_model-00003-of-00004.safetensors b/diffusion_pytorch_model-00003-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ba85f1ab9e36b7134710fd103ef56cbef6395069
--- /dev/null
+++ b/diffusion_pytorch_model-00003-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e2aa343b0ba04f563566e9959a439611a18189cd9accdc04d59681be9ce5be50
+size 9954400528
diff --git a/diffusion_pytorch_model-00004-of-00004.safetensors b/diffusion_pytorch_model-00004-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c72073822391009da760a6f712572dd151f7b85c
--- /dev/null
+++ b/diffusion_pytorch_model-00004-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5fea7f38aa4cb70ed59a9ecf406d0c05f3dfc85ee70bc83de35679c42642d1b
+size 4744748472
diff --git a/diffusion_pytorch_model.safetensors.index.json b/diffusion_pytorch_model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..a9742c9b1a50dc78a054ef2cc2d33f894156a9ea
--- /dev/null
+++ b/diffusion_pytorch_model.safetensors.index.json
@@ -0,0 +1,1448 @@
+{
+ "metadata": {
+ "total_size": 34549634216
+ },
+ "weight_map": {
+ "blocks.0.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.0.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.1.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.10.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.11.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.11.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.11.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.11.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.11.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.12.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.12.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.13.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.14.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.15.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.16.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.17.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.18.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.19.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.2.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.2.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.20.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.20.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.21.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.22.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.k_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.v_img.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.cross_attn.v_img.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.ffn.0.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.ffn.0.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.ffn.2.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.ffn.2.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.norm3.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.norm3.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.k.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.norm_k.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.norm_q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.o.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.o.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.q.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.q.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.v.bias": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.23.self_attn.v.weight": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.24.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.modulation": "diffusion_pytorch_model-00002-of-00004.safetensors",
+ "blocks.24.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.24.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.25.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.26.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.27.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.28.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.29.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.3.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.3.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.30.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.30.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.31.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.32.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.33.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.34.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.k_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.v_img.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.cross_attn.v_img.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.ffn.0.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.ffn.0.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.ffn.2.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.ffn.2.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.35.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.cross_attn.k.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.k_img.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.k_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.norm_k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.norm_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.o.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.o.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.cross_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.cross_attn.v.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.v.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.v_img.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.cross_attn.v_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.ffn.0.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.ffn.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.ffn.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.ffn.2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.36.modulation": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.norm3.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.norm3.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.k.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.norm_k.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.norm_q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.o.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.o.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.q.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.q.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.v.bias": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.36.self_attn.v.weight": "diffusion_pytorch_model-00003-of-00004.safetensors",
+ "blocks.37.cross_attn.k.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.k_img.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.k_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.norm_k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.norm_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.o.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.o.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.v.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.v.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.v_img.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.cross_attn.v_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.ffn.0.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.ffn.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.ffn.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.ffn.2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.modulation": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.norm3.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.norm3.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.k.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.norm_k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.norm_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.o.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.o.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.v.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.37.self_attn.v.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.k.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.k_img.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.k_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.norm_k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.norm_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.o.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.o.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.v.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.v.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.v_img.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.cross_attn.v_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.ffn.0.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.ffn.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.ffn.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.ffn.2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.modulation": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.norm3.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.norm3.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.k.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.norm_k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.norm_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.o.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.o.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.v.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.38.self_attn.v.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.k.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.k_img.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.k_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.norm_k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.norm_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.o.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.o.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.v.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.v.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.v_img.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.cross_attn.v_img.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.ffn.0.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.ffn.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.ffn.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.ffn.2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.modulation": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.norm3.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.norm3.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.k.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.norm_k.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.norm_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.o.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.o.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.v.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.39.self_attn.v.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "blocks.4.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.4.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.5.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.6.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.7.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.8.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.k_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.norm_k_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.v_img.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.cross_attn.v_img.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.ffn.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.ffn.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.ffn.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.ffn.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.modulation": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.norm3.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.norm3.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.k.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.norm_k.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.norm_q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.o.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.o.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.q.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.q.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.v.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "blocks.9.self_attn.v.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "face_adapter.fuser_blocks.0.k_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.0.linear1_kv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.0.linear1_kv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.0.linear1_q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.0.linear1_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.0.linear2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.0.linear2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.0.q_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.1.k_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.1.linear1_kv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.1.linear1_kv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.1.linear1_q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.1.linear1_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.1.linear2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.1.linear2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.1.q_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.2.k_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.2.linear1_kv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.2.linear1_kv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.2.linear1_q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.2.linear1_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.2.linear2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.2.linear2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.2.q_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.3.k_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.3.linear1_kv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.3.linear1_kv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.3.linear1_q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.3.linear1_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.3.linear2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.3.linear2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.3.q_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.4.k_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.4.linear1_kv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.4.linear1_kv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.4.linear1_q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.4.linear1_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.4.linear2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.4.linear2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.4.q_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.5.k_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.5.linear1_kv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.5.linear1_kv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.5.linear1_q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.5.linear1_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.5.linear2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.5.linear2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.5.q_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.6.k_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.6.linear1_kv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.6.linear1_kv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.6.linear1_q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.6.linear1_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.6.linear2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.6.linear2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.6.q_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.7.k_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.7.linear1_kv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.7.linear1_kv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.7.linear1_q.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.7.linear1_q.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.7.linear2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.7.linear2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_adapter.fuser_blocks.7.q_norm.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_encoder.conv1_local.conv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_encoder.conv1_local.conv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_encoder.conv2.conv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_encoder.conv2.conv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_encoder.conv3.conv.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_encoder.conv3.conv.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_encoder.out_proj.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_encoder.out_proj.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "face_encoder.padding_tokens": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "head.head.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "head.head.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "head.modulation": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "img_emb.proj.0.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "img_emb.proj.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "img_emb.proj.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "img_emb.proj.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "img_emb.proj.3.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "img_emb.proj.3.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "img_emb.proj.4.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "img_emb.proj.4.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.dec.direction.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.0.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.2.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.3.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.3.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.4.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.fc.4.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.0.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.0.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.1.conv1.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.1.conv1.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.1.conv2.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.1.conv2.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.1.conv2.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.1.skip.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.1.skip.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.2.conv1.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.2.conv1.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.2.conv2.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.2.conv2.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.2.conv2.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.2.skip.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.2.skip.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.3.conv1.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.3.conv1.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.3.conv2.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.3.conv2.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.3.conv2.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.3.skip.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.3.skip.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.4.conv1.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.4.conv1.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.4.conv2.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.4.conv2.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.4.conv2.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.4.skip.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.4.skip.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.5.conv1.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.5.conv1.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.5.conv2.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.5.conv2.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.5.conv2.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.5.skip.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.5.skip.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.6.conv1.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.6.conv1.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.6.conv2.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.6.conv2.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.6.conv2.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.6.skip.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.6.skip.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.7.conv1.0.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.7.conv1.1.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.7.conv2.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.7.conv2.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.7.conv2.2.bias": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.7.skip.0.kernel": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.7.skip.1.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "motion_encoder.enc.net_app.convs.8.weight": "diffusion_pytorch_model-00004-of-00004.safetensors",
+ "patch_embedding.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "patch_embedding.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "pose_patch_embedding.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "pose_patch_embedding.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "text_embedding.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "text_embedding.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "text_embedding.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "text_embedding.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "time_embedding.0.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "time_embedding.0.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "time_embedding.2.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "time_embedding.2.weight": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "time_projection.1.bias": "diffusion_pytorch_model-00001-of-00004.safetensors",
+ "time_projection.1.weight": "diffusion_pytorch_model-00001-of-00004.safetensors"
+ }
+}
diff --git a/google/umt5-xxl/special_tokens_map.json b/google/umt5-xxl/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14855e7052ffbb595057dfd791d293c1c940db2c
--- /dev/null
+++ b/google/umt5-xxl/special_tokens_map.json
@@ -0,0 +1,308 @@
+{
+ "additional_special_tokens": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "