Upload 20 files
Browse files- .gitattributes +1 -0
- added_tokens.json +1049 -0
- chat_template.jinja +52 -0
- config.json +281 -0
- configuration_intern_vit.py +118 -0
- configuration_yuan.py +46 -0
- configuration_yuanvl.py +95 -0
- conversation.py +399 -0
- flash_attention.py +76 -0
- generation_config.json +5 -0
- model.safetensors.index.json +0 -0
- modeling_intern_vit.py +366 -0
- modeling_yuanlm2.py +1624 -0
- modeling_yuanvl_chat.py +400 -0
- mq_test_demo.py +576 -0
- recipe.yaml +11 -0
- special_tokens_map.json +1085 -0
- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
- utils.py +144 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
|
@@ -0,0 +1,1049 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</3dbox>": 134971,
|
| 3 |
+
"</IMAGE>": 134962,
|
| 4 |
+
"</box>": 134967,
|
| 5 |
+
"</code_query>": 135987,
|
| 6 |
+
"</code_result>": 135989,
|
| 7 |
+
"</depth>": 134973,
|
| 8 |
+
"</final_answer>": 135999,
|
| 9 |
+
"</infer>": 135991,
|
| 10 |
+
"</inferresult>": 135993,
|
| 11 |
+
"</obj>": 134965,
|
| 12 |
+
"</point>": 134969,
|
| 13 |
+
"</search_query>": 135985,
|
| 14 |
+
"</search_result>": 135983,
|
| 15 |
+
"</think>": 135981,
|
| 16 |
+
"</tool_calls>": 135995,
|
| 17 |
+
"</tool_response>": 135997,
|
| 18 |
+
"<3dbox>": 134970,
|
| 19 |
+
"<BOS>": 134960,
|
| 20 |
+
"<FIM_MIDDLE>": 134957,
|
| 21 |
+
"<FIM_PREFIX>": 134956,
|
| 22 |
+
"<FIM_SUFFIX>": 134955,
|
| 23 |
+
"<IMAGE>": 134961,
|
| 24 |
+
"<box>": 134966,
|
| 25 |
+
"<code_query>": 135986,
|
| 26 |
+
"<code_result>": 135988,
|
| 27 |
+
"<depth>": 134972,
|
| 28 |
+
"<eog>": 135975,
|
| 29 |
+
"<eop>": 135974,
|
| 30 |
+
"<file_sep>": 134959,
|
| 31 |
+
"<final_answer>": 135998,
|
| 32 |
+
"<grounding>": 134963,
|
| 33 |
+
"<infer>": 135990,
|
| 34 |
+
"<inferresult>": 135992,
|
| 35 |
+
"<mask>": 134953,
|
| 36 |
+
"<obj>": 134964,
|
| 37 |
+
"<point>": 134968,
|
| 38 |
+
"<predict>": 134954,
|
| 39 |
+
"<repo_name>": 134958,
|
| 40 |
+
"<search_query>": 135984,
|
| 41 |
+
"<search_result>": 135982,
|
| 42 |
+
"<think>": 135980,
|
| 43 |
+
"<tool_calls>": 135994,
|
| 44 |
+
"<tool_response>": 135996,
|
| 45 |
+
"<|Assistant|>": 135979,
|
| 46 |
+
"<|User|>": 135978,
|
| 47 |
+
"<|begin_of_sentence|>": 135976,
|
| 48 |
+
"<|end_of_sentence|>": 135977,
|
| 49 |
+
"s000": 134974,
|
| 50 |
+
"s001": 134975,
|
| 51 |
+
"s002": 134976,
|
| 52 |
+
"s003": 134977,
|
| 53 |
+
"s004": 134978,
|
| 54 |
+
"s005": 134979,
|
| 55 |
+
"s006": 134980,
|
| 56 |
+
"s007": 134981,
|
| 57 |
+
"s008": 134982,
|
| 58 |
+
"s009": 134983,
|
| 59 |
+
"s010": 134984,
|
| 60 |
+
"s011": 134985,
|
| 61 |
+
"s012": 134986,
|
| 62 |
+
"s013": 134987,
|
| 63 |
+
"s014": 134988,
|
| 64 |
+
"s015": 134989,
|
| 65 |
+
"s016": 134990,
|
| 66 |
+
"s017": 134991,
|
| 67 |
+
"s018": 134992,
|
| 68 |
+
"s019": 134993,
|
| 69 |
+
"s020": 134994,
|
| 70 |
+
"s021": 134995,
|
| 71 |
+
"s022": 134996,
|
| 72 |
+
"s023": 134997,
|
| 73 |
+
"s024": 134998,
|
| 74 |
+
"s025": 134999,
|
| 75 |
+
"s026": 135000,
|
| 76 |
+
"s027": 135001,
|
| 77 |
+
"s028": 135002,
|
| 78 |
+
"s029": 135003,
|
| 79 |
+
"s030": 135004,
|
| 80 |
+
"s031": 135005,
|
| 81 |
+
"s032": 135006,
|
| 82 |
+
"s033": 135007,
|
| 83 |
+
"s034": 135008,
|
| 84 |
+
"s035": 135009,
|
| 85 |
+
"s036": 135010,
|
| 86 |
+
"s037": 135011,
|
| 87 |
+
"s038": 135012,
|
| 88 |
+
"s039": 135013,
|
| 89 |
+
"s040": 135014,
|
| 90 |
+
"s041": 135015,
|
| 91 |
+
"s042": 135016,
|
| 92 |
+
"s043": 135017,
|
| 93 |
+
"s044": 135018,
|
| 94 |
+
"s045": 135019,
|
| 95 |
+
"s046": 135020,
|
| 96 |
+
"s047": 135021,
|
| 97 |
+
"s048": 135022,
|
| 98 |
+
"s049": 135023,
|
| 99 |
+
"s050": 135024,
|
| 100 |
+
"s051": 135025,
|
| 101 |
+
"s052": 135026,
|
| 102 |
+
"s053": 135027,
|
| 103 |
+
"s054": 135028,
|
| 104 |
+
"s055": 135029,
|
| 105 |
+
"s056": 135030,
|
| 106 |
+
"s057": 135031,
|
| 107 |
+
"s058": 135032,
|
| 108 |
+
"s059": 135033,
|
| 109 |
+
"s060": 135034,
|
| 110 |
+
"s061": 135035,
|
| 111 |
+
"s062": 135036,
|
| 112 |
+
"s063": 135037,
|
| 113 |
+
"s064": 135038,
|
| 114 |
+
"s065": 135039,
|
| 115 |
+
"s066": 135040,
|
| 116 |
+
"s067": 135041,
|
| 117 |
+
"s068": 135042,
|
| 118 |
+
"s069": 135043,
|
| 119 |
+
"s070": 135044,
|
| 120 |
+
"s071": 135045,
|
| 121 |
+
"s072": 135046,
|
| 122 |
+
"s073": 135047,
|
| 123 |
+
"s074": 135048,
|
| 124 |
+
"s075": 135049,
|
| 125 |
+
"s076": 135050,
|
| 126 |
+
"s077": 135051,
|
| 127 |
+
"s078": 135052,
|
| 128 |
+
"s079": 135053,
|
| 129 |
+
"s080": 135054,
|
| 130 |
+
"s081": 135055,
|
| 131 |
+
"s082": 135056,
|
| 132 |
+
"s083": 135057,
|
| 133 |
+
"s084": 135058,
|
| 134 |
+
"s085": 135059,
|
| 135 |
+
"s086": 135060,
|
| 136 |
+
"s087": 135061,
|
| 137 |
+
"s088": 135062,
|
| 138 |
+
"s089": 135063,
|
| 139 |
+
"s090": 135064,
|
| 140 |
+
"s091": 135065,
|
| 141 |
+
"s092": 135066,
|
| 142 |
+
"s093": 135067,
|
| 143 |
+
"s094": 135068,
|
| 144 |
+
"s095": 135069,
|
| 145 |
+
"s096": 135070,
|
| 146 |
+
"s097": 135071,
|
| 147 |
+
"s098": 135072,
|
| 148 |
+
"s099": 135073,
|
| 149 |
+
"s100": 135074,
|
| 150 |
+
"s101": 135075,
|
| 151 |
+
"s102": 135076,
|
| 152 |
+
"s103": 135077,
|
| 153 |
+
"s104": 135078,
|
| 154 |
+
"s105": 135079,
|
| 155 |
+
"s106": 135080,
|
| 156 |
+
"s107": 135081,
|
| 157 |
+
"s108": 135082,
|
| 158 |
+
"s109": 135083,
|
| 159 |
+
"s110": 135084,
|
| 160 |
+
"s111": 135085,
|
| 161 |
+
"s112": 135086,
|
| 162 |
+
"s113": 135087,
|
| 163 |
+
"s114": 135088,
|
| 164 |
+
"s115": 135089,
|
| 165 |
+
"s116": 135090,
|
| 166 |
+
"s117": 135091,
|
| 167 |
+
"s118": 135092,
|
| 168 |
+
"s119": 135093,
|
| 169 |
+
"s120": 135094,
|
| 170 |
+
"s121": 135095,
|
| 171 |
+
"s122": 135096,
|
| 172 |
+
"s123": 135097,
|
| 173 |
+
"s124": 135098,
|
| 174 |
+
"s125": 135099,
|
| 175 |
+
"s126": 135100,
|
| 176 |
+
"s127": 135101,
|
| 177 |
+
"s128": 135102,
|
| 178 |
+
"s129": 135103,
|
| 179 |
+
"s130": 135104,
|
| 180 |
+
"s131": 135105,
|
| 181 |
+
"s132": 135106,
|
| 182 |
+
"s133": 135107,
|
| 183 |
+
"s134": 135108,
|
| 184 |
+
"s135": 135109,
|
| 185 |
+
"s136": 135110,
|
| 186 |
+
"s137": 135111,
|
| 187 |
+
"s138": 135112,
|
| 188 |
+
"s139": 135113,
|
| 189 |
+
"s140": 135114,
|
| 190 |
+
"s141": 135115,
|
| 191 |
+
"s142": 135116,
|
| 192 |
+
"s143": 135117,
|
| 193 |
+
"s144": 135118,
|
| 194 |
+
"s145": 135119,
|
| 195 |
+
"s146": 135120,
|
| 196 |
+
"s147": 135121,
|
| 197 |
+
"s148": 135122,
|
| 198 |
+
"s149": 135123,
|
| 199 |
+
"s150": 135124,
|
| 200 |
+
"s151": 135125,
|
| 201 |
+
"s152": 135126,
|
| 202 |
+
"s153": 135127,
|
| 203 |
+
"s154": 135128,
|
| 204 |
+
"s155": 135129,
|
| 205 |
+
"s156": 135130,
|
| 206 |
+
"s157": 135131,
|
| 207 |
+
"s158": 135132,
|
| 208 |
+
"s159": 135133,
|
| 209 |
+
"s160": 135134,
|
| 210 |
+
"s161": 135135,
|
| 211 |
+
"s162": 135136,
|
| 212 |
+
"s163": 135137,
|
| 213 |
+
"s164": 135138,
|
| 214 |
+
"s165": 135139,
|
| 215 |
+
"s166": 135140,
|
| 216 |
+
"s167": 135141,
|
| 217 |
+
"s168": 135142,
|
| 218 |
+
"s169": 135143,
|
| 219 |
+
"s170": 135144,
|
| 220 |
+
"s171": 135145,
|
| 221 |
+
"s172": 135146,
|
| 222 |
+
"s173": 135147,
|
| 223 |
+
"s174": 135148,
|
| 224 |
+
"s175": 135149,
|
| 225 |
+
"s176": 135150,
|
| 226 |
+
"s177": 135151,
|
| 227 |
+
"s178": 135152,
|
| 228 |
+
"s179": 135153,
|
| 229 |
+
"s180": 135154,
|
| 230 |
+
"s181": 135155,
|
| 231 |
+
"s182": 135156,
|
| 232 |
+
"s183": 135157,
|
| 233 |
+
"s184": 135158,
|
| 234 |
+
"s185": 135159,
|
| 235 |
+
"s186": 135160,
|
| 236 |
+
"s187": 135161,
|
| 237 |
+
"s188": 135162,
|
| 238 |
+
"s189": 135163,
|
| 239 |
+
"s190": 135164,
|
| 240 |
+
"s191": 135165,
|
| 241 |
+
"s192": 135166,
|
| 242 |
+
"s193": 135167,
|
| 243 |
+
"s194": 135168,
|
| 244 |
+
"s195": 135169,
|
| 245 |
+
"s196": 135170,
|
| 246 |
+
"s197": 135171,
|
| 247 |
+
"s198": 135172,
|
| 248 |
+
"s199": 135173,
|
| 249 |
+
"s200": 135174,
|
| 250 |
+
"s201": 135175,
|
| 251 |
+
"s202": 135176,
|
| 252 |
+
"s203": 135177,
|
| 253 |
+
"s204": 135178,
|
| 254 |
+
"s205": 135179,
|
| 255 |
+
"s206": 135180,
|
| 256 |
+
"s207": 135181,
|
| 257 |
+
"s208": 135182,
|
| 258 |
+
"s209": 135183,
|
| 259 |
+
"s210": 135184,
|
| 260 |
+
"s211": 135185,
|
| 261 |
+
"s212": 135186,
|
| 262 |
+
"s213": 135187,
|
| 263 |
+
"s214": 135188,
|
| 264 |
+
"s215": 135189,
|
| 265 |
+
"s216": 135190,
|
| 266 |
+
"s217": 135191,
|
| 267 |
+
"s218": 135192,
|
| 268 |
+
"s219": 135193,
|
| 269 |
+
"s220": 135194,
|
| 270 |
+
"s221": 135195,
|
| 271 |
+
"s222": 135196,
|
| 272 |
+
"s223": 135197,
|
| 273 |
+
"s224": 135198,
|
| 274 |
+
"s225": 135199,
|
| 275 |
+
"s226": 135200,
|
| 276 |
+
"s227": 135201,
|
| 277 |
+
"s228": 135202,
|
| 278 |
+
"s229": 135203,
|
| 279 |
+
"s230": 135204,
|
| 280 |
+
"s231": 135205,
|
| 281 |
+
"s232": 135206,
|
| 282 |
+
"s233": 135207,
|
| 283 |
+
"s234": 135208,
|
| 284 |
+
"s235": 135209,
|
| 285 |
+
"s236": 135210,
|
| 286 |
+
"s237": 135211,
|
| 287 |
+
"s238": 135212,
|
| 288 |
+
"s239": 135213,
|
| 289 |
+
"s240": 135214,
|
| 290 |
+
"s241": 135215,
|
| 291 |
+
"s242": 135216,
|
| 292 |
+
"s243": 135217,
|
| 293 |
+
"s244": 135218,
|
| 294 |
+
"s245": 135219,
|
| 295 |
+
"s246": 135220,
|
| 296 |
+
"s247": 135221,
|
| 297 |
+
"s248": 135222,
|
| 298 |
+
"s249": 135223,
|
| 299 |
+
"s250": 135224,
|
| 300 |
+
"s251": 135225,
|
| 301 |
+
"s252": 135226,
|
| 302 |
+
"s253": 135227,
|
| 303 |
+
"s254": 135228,
|
| 304 |
+
"s255": 135229,
|
| 305 |
+
"s256": 135230,
|
| 306 |
+
"s257": 135231,
|
| 307 |
+
"s258": 135232,
|
| 308 |
+
"s259": 135233,
|
| 309 |
+
"s260": 135234,
|
| 310 |
+
"s261": 135235,
|
| 311 |
+
"s262": 135236,
|
| 312 |
+
"s263": 135237,
|
| 313 |
+
"s264": 135238,
|
| 314 |
+
"s265": 135239,
|
| 315 |
+
"s266": 135240,
|
| 316 |
+
"s267": 135241,
|
| 317 |
+
"s268": 135242,
|
| 318 |
+
"s269": 135243,
|
| 319 |
+
"s270": 135244,
|
| 320 |
+
"s271": 135245,
|
| 321 |
+
"s272": 135246,
|
| 322 |
+
"s273": 135247,
|
| 323 |
+
"s274": 135248,
|
| 324 |
+
"s275": 135249,
|
| 325 |
+
"s276": 135250,
|
| 326 |
+
"s277": 135251,
|
| 327 |
+
"s278": 135252,
|
| 328 |
+
"s279": 135253,
|
| 329 |
+
"s280": 135254,
|
| 330 |
+
"s281": 135255,
|
| 331 |
+
"s282": 135256,
|
| 332 |
+
"s283": 135257,
|
| 333 |
+
"s284": 135258,
|
| 334 |
+
"s285": 135259,
|
| 335 |
+
"s286": 135260,
|
| 336 |
+
"s287": 135261,
|
| 337 |
+
"s288": 135262,
|
| 338 |
+
"s289": 135263,
|
| 339 |
+
"s290": 135264,
|
| 340 |
+
"s291": 135265,
|
| 341 |
+
"s292": 135266,
|
| 342 |
+
"s293": 135267,
|
| 343 |
+
"s294": 135268,
|
| 344 |
+
"s295": 135269,
|
| 345 |
+
"s296": 135270,
|
| 346 |
+
"s297": 135271,
|
| 347 |
+
"s298": 135272,
|
| 348 |
+
"s299": 135273,
|
| 349 |
+
"s300": 135274,
|
| 350 |
+
"s301": 135275,
|
| 351 |
+
"s302": 135276,
|
| 352 |
+
"s303": 135277,
|
| 353 |
+
"s304": 135278,
|
| 354 |
+
"s305": 135279,
|
| 355 |
+
"s306": 135280,
|
| 356 |
+
"s307": 135281,
|
| 357 |
+
"s308": 135282,
|
| 358 |
+
"s309": 135283,
|
| 359 |
+
"s310": 135284,
|
| 360 |
+
"s311": 135285,
|
| 361 |
+
"s312": 135286,
|
| 362 |
+
"s313": 135287,
|
| 363 |
+
"s314": 135288,
|
| 364 |
+
"s315": 135289,
|
| 365 |
+
"s316": 135290,
|
| 366 |
+
"s317": 135291,
|
| 367 |
+
"s318": 135292,
|
| 368 |
+
"s319": 135293,
|
| 369 |
+
"s320": 135294,
|
| 370 |
+
"s321": 135295,
|
| 371 |
+
"s322": 135296,
|
| 372 |
+
"s323": 135297,
|
| 373 |
+
"s324": 135298,
|
| 374 |
+
"s325": 135299,
|
| 375 |
+
"s326": 135300,
|
| 376 |
+
"s327": 135301,
|
| 377 |
+
"s328": 135302,
|
| 378 |
+
"s329": 135303,
|
| 379 |
+
"s330": 135304,
|
| 380 |
+
"s331": 135305,
|
| 381 |
+
"s332": 135306,
|
| 382 |
+
"s333": 135307,
|
| 383 |
+
"s334": 135308,
|
| 384 |
+
"s335": 135309,
|
| 385 |
+
"s336": 135310,
|
| 386 |
+
"s337": 135311,
|
| 387 |
+
"s338": 135312,
|
| 388 |
+
"s339": 135313,
|
| 389 |
+
"s340": 135314,
|
| 390 |
+
"s341": 135315,
|
| 391 |
+
"s342": 135316,
|
| 392 |
+
"s343": 135317,
|
| 393 |
+
"s344": 135318,
|
| 394 |
+
"s345": 135319,
|
| 395 |
+
"s346": 135320,
|
| 396 |
+
"s347": 135321,
|
| 397 |
+
"s348": 135322,
|
| 398 |
+
"s349": 135323,
|
| 399 |
+
"s350": 135324,
|
| 400 |
+
"s351": 135325,
|
| 401 |
+
"s352": 135326,
|
| 402 |
+
"s353": 135327,
|
| 403 |
+
"s354": 135328,
|
| 404 |
+
"s355": 135329,
|
| 405 |
+
"s356": 135330,
|
| 406 |
+
"s357": 135331,
|
| 407 |
+
"s358": 135332,
|
| 408 |
+
"s359": 135333,
|
| 409 |
+
"s360": 135334,
|
| 410 |
+
"s361": 135335,
|
| 411 |
+
"s362": 135336,
|
| 412 |
+
"s363": 135337,
|
| 413 |
+
"s364": 135338,
|
| 414 |
+
"s365": 135339,
|
| 415 |
+
"s366": 135340,
|
| 416 |
+
"s367": 135341,
|
| 417 |
+
"s368": 135342,
|
| 418 |
+
"s369": 135343,
|
| 419 |
+
"s370": 135344,
|
| 420 |
+
"s371": 135345,
|
| 421 |
+
"s372": 135346,
|
| 422 |
+
"s373": 135347,
|
| 423 |
+
"s374": 135348,
|
| 424 |
+
"s375": 135349,
|
| 425 |
+
"s376": 135350,
|
| 426 |
+
"s377": 135351,
|
| 427 |
+
"s378": 135352,
|
| 428 |
+
"s379": 135353,
|
| 429 |
+
"s380": 135354,
|
| 430 |
+
"s381": 135355,
|
| 431 |
+
"s382": 135356,
|
| 432 |
+
"s383": 135357,
|
| 433 |
+
"s384": 135358,
|
| 434 |
+
"s385": 135359,
|
| 435 |
+
"s386": 135360,
|
| 436 |
+
"s387": 135361,
|
| 437 |
+
"s388": 135362,
|
| 438 |
+
"s389": 135363,
|
| 439 |
+
"s390": 135364,
|
| 440 |
+
"s391": 135365,
|
| 441 |
+
"s392": 135366,
|
| 442 |
+
"s393": 135367,
|
| 443 |
+
"s394": 135368,
|
| 444 |
+
"s395": 135369,
|
| 445 |
+
"s396": 135370,
|
| 446 |
+
"s397": 135371,
|
| 447 |
+
"s398": 135372,
|
| 448 |
+
"s399": 135373,
|
| 449 |
+
"s400": 135374,
|
| 450 |
+
"s401": 135375,
|
| 451 |
+
"s402": 135376,
|
| 452 |
+
"s403": 135377,
|
| 453 |
+
"s404": 135378,
|
| 454 |
+
"s405": 135379,
|
| 455 |
+
"s406": 135380,
|
| 456 |
+
"s407": 135381,
|
| 457 |
+
"s408": 135382,
|
| 458 |
+
"s409": 135383,
|
| 459 |
+
"s410": 135384,
|
| 460 |
+
"s411": 135385,
|
| 461 |
+
"s412": 135386,
|
| 462 |
+
"s413": 135387,
|
| 463 |
+
"s414": 135388,
|
| 464 |
+
"s415": 135389,
|
| 465 |
+
"s416": 135390,
|
| 466 |
+
"s417": 135391,
|
| 467 |
+
"s418": 135392,
|
| 468 |
+
"s419": 135393,
|
| 469 |
+
"s420": 135394,
|
| 470 |
+
"s421": 135395,
|
| 471 |
+
"s422": 135396,
|
| 472 |
+
"s423": 135397,
|
| 473 |
+
"s424": 135398,
|
| 474 |
+
"s425": 135399,
|
| 475 |
+
"s426": 135400,
|
| 476 |
+
"s427": 135401,
|
| 477 |
+
"s428": 135402,
|
| 478 |
+
"s429": 135403,
|
| 479 |
+
"s430": 135404,
|
| 480 |
+
"s431": 135405,
|
| 481 |
+
"s432": 135406,
|
| 482 |
+
"s433": 135407,
|
| 483 |
+
"s434": 135408,
|
| 484 |
+
"s435": 135409,
|
| 485 |
+
"s436": 135410,
|
| 486 |
+
"s437": 135411,
|
| 487 |
+
"s438": 135412,
|
| 488 |
+
"s439": 135413,
|
| 489 |
+
"s440": 135414,
|
| 490 |
+
"s441": 135415,
|
| 491 |
+
"s442": 135416,
|
| 492 |
+
"s443": 135417,
|
| 493 |
+
"s444": 135418,
|
| 494 |
+
"s445": 135419,
|
| 495 |
+
"s446": 135420,
|
| 496 |
+
"s447": 135421,
|
| 497 |
+
"s448": 135422,
|
| 498 |
+
"s449": 135423,
|
| 499 |
+
"s450": 135424,
|
| 500 |
+
"s451": 135425,
|
| 501 |
+
"s452": 135426,
|
| 502 |
+
"s453": 135427,
|
| 503 |
+
"s454": 135428,
|
| 504 |
+
"s455": 135429,
|
| 505 |
+
"s456": 135430,
|
| 506 |
+
"s457": 135431,
|
| 507 |
+
"s458": 135432,
|
| 508 |
+
"s459": 135433,
|
| 509 |
+
"s460": 135434,
|
| 510 |
+
"s461": 135435,
|
| 511 |
+
"s462": 135436,
|
| 512 |
+
"s463": 135437,
|
| 513 |
+
"s464": 135438,
|
| 514 |
+
"s465": 135439,
|
| 515 |
+
"s466": 135440,
|
| 516 |
+
"s467": 135441,
|
| 517 |
+
"s468": 135442,
|
| 518 |
+
"s469": 135443,
|
| 519 |
+
"s470": 135444,
|
| 520 |
+
"s471": 135445,
|
| 521 |
+
"s472": 135446,
|
| 522 |
+
"s473": 135447,
|
| 523 |
+
"s474": 135448,
|
| 524 |
+
"s475": 135449,
|
| 525 |
+
"s476": 135450,
|
| 526 |
+
"s477": 135451,
|
| 527 |
+
"s478": 135452,
|
| 528 |
+
"s479": 135453,
|
| 529 |
+
"s480": 135454,
|
| 530 |
+
"s481": 135455,
|
| 531 |
+
"s482": 135456,
|
| 532 |
+
"s483": 135457,
|
| 533 |
+
"s484": 135458,
|
| 534 |
+
"s485": 135459,
|
| 535 |
+
"s486": 135460,
|
| 536 |
+
"s487": 135461,
|
| 537 |
+
"s488": 135462,
|
| 538 |
+
"s489": 135463,
|
| 539 |
+
"s490": 135464,
|
| 540 |
+
"s491": 135465,
|
| 541 |
+
"s492": 135466,
|
| 542 |
+
"s493": 135467,
|
| 543 |
+
"s494": 135468,
|
| 544 |
+
"s495": 135469,
|
| 545 |
+
"s496": 135470,
|
| 546 |
+
"s497": 135471,
|
| 547 |
+
"s498": 135472,
|
| 548 |
+
"s499": 135473,
|
| 549 |
+
"s500": 135474,
|
| 550 |
+
"s501": 135475,
|
| 551 |
+
"s502": 135476,
|
| 552 |
+
"s503": 135477,
|
| 553 |
+
"s504": 135478,
|
| 554 |
+
"s505": 135479,
|
| 555 |
+
"s506": 135480,
|
| 556 |
+
"s507": 135481,
|
| 557 |
+
"s508": 135482,
|
| 558 |
+
"s509": 135483,
|
| 559 |
+
"s510": 135484,
|
| 560 |
+
"s511": 135485,
|
| 561 |
+
"s512": 135486,
|
| 562 |
+
"s513": 135487,
|
| 563 |
+
"s514": 135488,
|
| 564 |
+
"s515": 135489,
|
| 565 |
+
"s516": 135490,
|
| 566 |
+
"s517": 135491,
|
| 567 |
+
"s518": 135492,
|
| 568 |
+
"s519": 135493,
|
| 569 |
+
"s520": 135494,
|
| 570 |
+
"s521": 135495,
|
| 571 |
+
"s522": 135496,
|
| 572 |
+
"s523": 135497,
|
| 573 |
+
"s524": 135498,
|
| 574 |
+
"s525": 135499,
|
| 575 |
+
"s526": 135500,
|
| 576 |
+
"s527": 135501,
|
| 577 |
+
"s528": 135502,
|
| 578 |
+
"s529": 135503,
|
| 579 |
+
"s530": 135504,
|
| 580 |
+
"s531": 135505,
|
| 581 |
+
"s532": 135506,
|
| 582 |
+
"s533": 135507,
|
| 583 |
+
"s534": 135508,
|
| 584 |
+
"s535": 135509,
|
| 585 |
+
"s536": 135510,
|
| 586 |
+
"s537": 135511,
|
| 587 |
+
"s538": 135512,
|
| 588 |
+
"s539": 135513,
|
| 589 |
+
"s540": 135514,
|
| 590 |
+
"s541": 135515,
|
| 591 |
+
"s542": 135516,
|
| 592 |
+
"s543": 135517,
|
| 593 |
+
"s544": 135518,
|
| 594 |
+
"s545": 135519,
|
| 595 |
+
"s546": 135520,
|
| 596 |
+
"s547": 135521,
|
| 597 |
+
"s548": 135522,
|
| 598 |
+
"s549": 135523,
|
| 599 |
+
"s550": 135524,
|
| 600 |
+
"s551": 135525,
|
| 601 |
+
"s552": 135526,
|
| 602 |
+
"s553": 135527,
|
| 603 |
+
"s554": 135528,
|
| 604 |
+
"s555": 135529,
|
| 605 |
+
"s556": 135530,
|
| 606 |
+
"s557": 135531,
|
| 607 |
+
"s558": 135532,
|
| 608 |
+
"s559": 135533,
|
| 609 |
+
"s560": 135534,
|
| 610 |
+
"s561": 135535,
|
| 611 |
+
"s562": 135536,
|
| 612 |
+
"s563": 135537,
|
| 613 |
+
"s564": 135538,
|
| 614 |
+
"s565": 135539,
|
| 615 |
+
"s566": 135540,
|
| 616 |
+
"s567": 135541,
|
| 617 |
+
"s568": 135542,
|
| 618 |
+
"s569": 135543,
|
| 619 |
+
"s570": 135544,
|
| 620 |
+
"s571": 135545,
|
| 621 |
+
"s572": 135546,
|
| 622 |
+
"s573": 135547,
|
| 623 |
+
"s574": 135548,
|
| 624 |
+
"s575": 135549,
|
| 625 |
+
"s576": 135550,
|
| 626 |
+
"s577": 135551,
|
| 627 |
+
"s578": 135552,
|
| 628 |
+
"s579": 135553,
|
| 629 |
+
"s580": 135554,
|
| 630 |
+
"s581": 135555,
|
| 631 |
+
"s582": 135556,
|
| 632 |
+
"s583": 135557,
|
| 633 |
+
"s584": 135558,
|
| 634 |
+
"s585": 135559,
|
| 635 |
+
"s586": 135560,
|
| 636 |
+
"s587": 135561,
|
| 637 |
+
"s588": 135562,
|
| 638 |
+
"s589": 135563,
|
| 639 |
+
"s590": 135564,
|
| 640 |
+
"s591": 135565,
|
| 641 |
+
"s592": 135566,
|
| 642 |
+
"s593": 135567,
|
| 643 |
+
"s594": 135568,
|
| 644 |
+
"s595": 135569,
|
| 645 |
+
"s596": 135570,
|
| 646 |
+
"s597": 135571,
|
| 647 |
+
"s598": 135572,
|
| 648 |
+
"s599": 135573,
|
| 649 |
+
"s600": 135574,
|
| 650 |
+
"s601": 135575,
|
| 651 |
+
"s602": 135576,
|
| 652 |
+
"s603": 135577,
|
| 653 |
+
"s604": 135578,
|
| 654 |
+
"s605": 135579,
|
| 655 |
+
"s606": 135580,
|
| 656 |
+
"s607": 135581,
|
| 657 |
+
"s608": 135582,
|
| 658 |
+
"s609": 135583,
|
| 659 |
+
"s610": 135584,
|
| 660 |
+
"s611": 135585,
|
| 661 |
+
"s612": 135586,
|
| 662 |
+
"s613": 135587,
|
| 663 |
+
"s614": 135588,
|
| 664 |
+
"s615": 135589,
|
| 665 |
+
"s616": 135590,
|
| 666 |
+
"s617": 135591,
|
| 667 |
+
"s618": 135592,
|
| 668 |
+
"s619": 135593,
|
| 669 |
+
"s620": 135594,
|
| 670 |
+
"s621": 135595,
|
| 671 |
+
"s622": 135596,
|
| 672 |
+
"s623": 135597,
|
| 673 |
+
"s624": 135598,
|
| 674 |
+
"s625": 135599,
|
| 675 |
+
"s626": 135600,
|
| 676 |
+
"s627": 135601,
|
| 677 |
+
"s628": 135602,
|
| 678 |
+
"s629": 135603,
|
| 679 |
+
"s630": 135604,
|
| 680 |
+
"s631": 135605,
|
| 681 |
+
"s632": 135606,
|
| 682 |
+
"s633": 135607,
|
| 683 |
+
"s634": 135608,
|
| 684 |
+
"s635": 135609,
|
| 685 |
+
"s636": 135610,
|
| 686 |
+
"s637": 135611,
|
| 687 |
+
"s638": 135612,
|
| 688 |
+
"s639": 135613,
|
| 689 |
+
"s640": 135614,
|
| 690 |
+
"s641": 135615,
|
| 691 |
+
"s642": 135616,
|
| 692 |
+
"s643": 135617,
|
| 693 |
+
"s644": 135618,
|
| 694 |
+
"s645": 135619,
|
| 695 |
+
"s646": 135620,
|
| 696 |
+
"s647": 135621,
|
| 697 |
+
"s648": 135622,
|
| 698 |
+
"s649": 135623,
|
| 699 |
+
"s650": 135624,
|
| 700 |
+
"s651": 135625,
|
| 701 |
+
"s652": 135626,
|
| 702 |
+
"s653": 135627,
|
| 703 |
+
"s654": 135628,
|
| 704 |
+
"s655": 135629,
|
| 705 |
+
"s656": 135630,
|
| 706 |
+
"s657": 135631,
|
| 707 |
+
"s658": 135632,
|
| 708 |
+
"s659": 135633,
|
| 709 |
+
"s660": 135634,
|
| 710 |
+
"s661": 135635,
|
| 711 |
+
"s662": 135636,
|
| 712 |
+
"s663": 135637,
|
| 713 |
+
"s664": 135638,
|
| 714 |
+
"s665": 135639,
|
| 715 |
+
"s666": 135640,
|
| 716 |
+
"s667": 135641,
|
| 717 |
+
"s668": 135642,
|
| 718 |
+
"s669": 135643,
|
| 719 |
+
"s670": 135644,
|
| 720 |
+
"s671": 135645,
|
| 721 |
+
"s672": 135646,
|
| 722 |
+
"s673": 135647,
|
| 723 |
+
"s674": 135648,
|
| 724 |
+
"s675": 135649,
|
| 725 |
+
"s676": 135650,
|
| 726 |
+
"s677": 135651,
|
| 727 |
+
"s678": 135652,
|
| 728 |
+
"s679": 135653,
|
| 729 |
+
"s680": 135654,
|
| 730 |
+
"s681": 135655,
|
| 731 |
+
"s682": 135656,
|
| 732 |
+
"s683": 135657,
|
| 733 |
+
"s684": 135658,
|
| 734 |
+
"s685": 135659,
|
| 735 |
+
"s686": 135660,
|
| 736 |
+
"s687": 135661,
|
| 737 |
+
"s688": 135662,
|
| 738 |
+
"s689": 135663,
|
| 739 |
+
"s690": 135664,
|
| 740 |
+
"s691": 135665,
|
| 741 |
+
"s692": 135666,
|
| 742 |
+
"s693": 135667,
|
| 743 |
+
"s694": 135668,
|
| 744 |
+
"s695": 135669,
|
| 745 |
+
"s696": 135670,
|
| 746 |
+
"s697": 135671,
|
| 747 |
+
"s698": 135672,
|
| 748 |
+
"s699": 135673,
|
| 749 |
+
"s700": 135674,
|
| 750 |
+
"s701": 135675,
|
| 751 |
+
"s702": 135676,
|
| 752 |
+
"s703": 135677,
|
| 753 |
+
"s704": 135678,
|
| 754 |
+
"s705": 135679,
|
| 755 |
+
"s706": 135680,
|
| 756 |
+
"s707": 135681,
|
| 757 |
+
"s708": 135682,
|
| 758 |
+
"s709": 135683,
|
| 759 |
+
"s710": 135684,
|
| 760 |
+
"s711": 135685,
|
| 761 |
+
"s712": 135686,
|
| 762 |
+
"s713": 135687,
|
| 763 |
+
"s714": 135688,
|
| 764 |
+
"s715": 135689,
|
| 765 |
+
"s716": 135690,
|
| 766 |
+
"s717": 135691,
|
| 767 |
+
"s718": 135692,
|
| 768 |
+
"s719": 135693,
|
| 769 |
+
"s720": 135694,
|
| 770 |
+
"s721": 135695,
|
| 771 |
+
"s722": 135696,
|
| 772 |
+
"s723": 135697,
|
| 773 |
+
"s724": 135698,
|
| 774 |
+
"s725": 135699,
|
| 775 |
+
"s726": 135700,
|
| 776 |
+
"s727": 135701,
|
| 777 |
+
"s728": 135702,
|
| 778 |
+
"s729": 135703,
|
| 779 |
+
"s730": 135704,
|
| 780 |
+
"s731": 135705,
|
| 781 |
+
"s732": 135706,
|
| 782 |
+
"s733": 135707,
|
| 783 |
+
"s734": 135708,
|
| 784 |
+
"s735": 135709,
|
| 785 |
+
"s736": 135710,
|
| 786 |
+
"s737": 135711,
|
| 787 |
+
"s738": 135712,
|
| 788 |
+
"s739": 135713,
|
| 789 |
+
"s740": 135714,
|
| 790 |
+
"s741": 135715,
|
| 791 |
+
"s742": 135716,
|
| 792 |
+
"s743": 135717,
|
| 793 |
+
"s744": 135718,
|
| 794 |
+
"s745": 135719,
|
| 795 |
+
"s746": 135720,
|
| 796 |
+
"s747": 135721,
|
| 797 |
+
"s748": 135722,
|
| 798 |
+
"s749": 135723,
|
| 799 |
+
"s750": 135724,
|
| 800 |
+
"s751": 135725,
|
| 801 |
+
"s752": 135726,
|
| 802 |
+
"s753": 135727,
|
| 803 |
+
"s754": 135728,
|
| 804 |
+
"s755": 135729,
|
| 805 |
+
"s756": 135730,
|
| 806 |
+
"s757": 135731,
|
| 807 |
+
"s758": 135732,
|
| 808 |
+
"s759": 135733,
|
| 809 |
+
"s760": 135734,
|
| 810 |
+
"s761": 135735,
|
| 811 |
+
"s762": 135736,
|
| 812 |
+
"s763": 135737,
|
| 813 |
+
"s764": 135738,
|
| 814 |
+
"s765": 135739,
|
| 815 |
+
"s766": 135740,
|
| 816 |
+
"s767": 135741,
|
| 817 |
+
"s768": 135742,
|
| 818 |
+
"s769": 135743,
|
| 819 |
+
"s770": 135744,
|
| 820 |
+
"s771": 135745,
|
| 821 |
+
"s772": 135746,
|
| 822 |
+
"s773": 135747,
|
| 823 |
+
"s774": 135748,
|
| 824 |
+
"s775": 135749,
|
| 825 |
+
"s776": 135750,
|
| 826 |
+
"s777": 135751,
|
| 827 |
+
"s778": 135752,
|
| 828 |
+
"s779": 135753,
|
| 829 |
+
"s780": 135754,
|
| 830 |
+
"s781": 135755,
|
| 831 |
+
"s782": 135756,
|
| 832 |
+
"s783": 135757,
|
| 833 |
+
"s784": 135758,
|
| 834 |
+
"s785": 135759,
|
| 835 |
+
"s786": 135760,
|
| 836 |
+
"s787": 135761,
|
| 837 |
+
"s788": 135762,
|
| 838 |
+
"s789": 135763,
|
| 839 |
+
"s790": 135764,
|
| 840 |
+
"s791": 135765,
|
| 841 |
+
"s792": 135766,
|
| 842 |
+
"s793": 135767,
|
| 843 |
+
"s794": 135768,
|
| 844 |
+
"s795": 135769,
|
| 845 |
+
"s796": 135770,
|
| 846 |
+
"s797": 135771,
|
| 847 |
+
"s798": 135772,
|
| 848 |
+
"s799": 135773,
|
| 849 |
+
"s800": 135774,
|
| 850 |
+
"s801": 135775,
|
| 851 |
+
"s802": 135776,
|
| 852 |
+
"s803": 135777,
|
| 853 |
+
"s804": 135778,
|
| 854 |
+
"s805": 135779,
|
| 855 |
+
"s806": 135780,
|
| 856 |
+
"s807": 135781,
|
| 857 |
+
"s808": 135782,
|
| 858 |
+
"s809": 135783,
|
| 859 |
+
"s810": 135784,
|
| 860 |
+
"s811": 135785,
|
| 861 |
+
"s812": 135786,
|
| 862 |
+
"s813": 135787,
|
| 863 |
+
"s814": 135788,
|
| 864 |
+
"s815": 135789,
|
| 865 |
+
"s816": 135790,
|
| 866 |
+
"s817": 135791,
|
| 867 |
+
"s818": 135792,
|
| 868 |
+
"s819": 135793,
|
| 869 |
+
"s820": 135794,
|
| 870 |
+
"s821": 135795,
|
| 871 |
+
"s822": 135796,
|
| 872 |
+
"s823": 135797,
|
| 873 |
+
"s824": 135798,
|
| 874 |
+
"s825": 135799,
|
| 875 |
+
"s826": 135800,
|
| 876 |
+
"s827": 135801,
|
| 877 |
+
"s828": 135802,
|
| 878 |
+
"s829": 135803,
|
| 879 |
+
"s830": 135804,
|
| 880 |
+
"s831": 135805,
|
| 881 |
+
"s832": 135806,
|
| 882 |
+
"s833": 135807,
|
| 883 |
+
"s834": 135808,
|
| 884 |
+
"s835": 135809,
|
| 885 |
+
"s836": 135810,
|
| 886 |
+
"s837": 135811,
|
| 887 |
+
"s838": 135812,
|
| 888 |
+
"s839": 135813,
|
| 889 |
+
"s840": 135814,
|
| 890 |
+
"s841": 135815,
|
| 891 |
+
"s842": 135816,
|
| 892 |
+
"s843": 135817,
|
| 893 |
+
"s844": 135818,
|
| 894 |
+
"s845": 135819,
|
| 895 |
+
"s846": 135820,
|
| 896 |
+
"s847": 135821,
|
| 897 |
+
"s848": 135822,
|
| 898 |
+
"s849": 135823,
|
| 899 |
+
"s850": 135824,
|
| 900 |
+
"s851": 135825,
|
| 901 |
+
"s852": 135826,
|
| 902 |
+
"s853": 135827,
|
| 903 |
+
"s854": 135828,
|
| 904 |
+
"s855": 135829,
|
| 905 |
+
"s856": 135830,
|
| 906 |
+
"s857": 135831,
|
| 907 |
+
"s858": 135832,
|
| 908 |
+
"s859": 135833,
|
| 909 |
+
"s860": 135834,
|
| 910 |
+
"s861": 135835,
|
| 911 |
+
"s862": 135836,
|
| 912 |
+
"s863": 135837,
|
| 913 |
+
"s864": 135838,
|
| 914 |
+
"s865": 135839,
|
| 915 |
+
"s866": 135840,
|
| 916 |
+
"s867": 135841,
|
| 917 |
+
"s868": 135842,
|
| 918 |
+
"s869": 135843,
|
| 919 |
+
"s870": 135844,
|
| 920 |
+
"s871": 135845,
|
| 921 |
+
"s872": 135846,
|
| 922 |
+
"s873": 135847,
|
| 923 |
+
"s874": 135848,
|
| 924 |
+
"s875": 135849,
|
| 925 |
+
"s876": 135850,
|
| 926 |
+
"s877": 135851,
|
| 927 |
+
"s878": 135852,
|
| 928 |
+
"s879": 135853,
|
| 929 |
+
"s880": 135854,
|
| 930 |
+
"s881": 135855,
|
| 931 |
+
"s882": 135856,
|
| 932 |
+
"s883": 135857,
|
| 933 |
+
"s884": 135858,
|
| 934 |
+
"s885": 135859,
|
| 935 |
+
"s886": 135860,
|
| 936 |
+
"s887": 135861,
|
| 937 |
+
"s888": 135862,
|
| 938 |
+
"s889": 135863,
|
| 939 |
+
"s890": 135864,
|
| 940 |
+
"s891": 135865,
|
| 941 |
+
"s892": 135866,
|
| 942 |
+
"s893": 135867,
|
| 943 |
+
"s894": 135868,
|
| 944 |
+
"s895": 135869,
|
| 945 |
+
"s896": 135870,
|
| 946 |
+
"s897": 135871,
|
| 947 |
+
"s898": 135872,
|
| 948 |
+
"s899": 135873,
|
| 949 |
+
"s900": 135874,
|
| 950 |
+
"s901": 135875,
|
| 951 |
+
"s902": 135876,
|
| 952 |
+
"s903": 135877,
|
| 953 |
+
"s904": 135878,
|
| 954 |
+
"s905": 135879,
|
| 955 |
+
"s906": 135880,
|
| 956 |
+
"s907": 135881,
|
| 957 |
+
"s908": 135882,
|
| 958 |
+
"s909": 135883,
|
| 959 |
+
"s910": 135884,
|
| 960 |
+
"s911": 135885,
|
| 961 |
+
"s912": 135886,
|
| 962 |
+
"s913": 135887,
|
| 963 |
+
"s914": 135888,
|
| 964 |
+
"s915": 135889,
|
| 965 |
+
"s916": 135890,
|
| 966 |
+
"s917": 135891,
|
| 967 |
+
"s918": 135892,
|
| 968 |
+
"s919": 135893,
|
| 969 |
+
"s920": 135894,
|
| 970 |
+
"s921": 135895,
|
| 971 |
+
"s922": 135896,
|
| 972 |
+
"s923": 135897,
|
| 973 |
+
"s924": 135898,
|
| 974 |
+
"s925": 135899,
|
| 975 |
+
"s926": 135900,
|
| 976 |
+
"s927": 135901,
|
| 977 |
+
"s928": 135902,
|
| 978 |
+
"s929": 135903,
|
| 979 |
+
"s930": 135904,
|
| 980 |
+
"s931": 135905,
|
| 981 |
+
"s932": 135906,
|
| 982 |
+
"s933": 135907,
|
| 983 |
+
"s934": 135908,
|
| 984 |
+
"s935": 135909,
|
| 985 |
+
"s936": 135910,
|
| 986 |
+
"s937": 135911,
|
| 987 |
+
"s938": 135912,
|
| 988 |
+
"s939": 135913,
|
| 989 |
+
"s940": 135914,
|
| 990 |
+
"s941": 135915,
|
| 991 |
+
"s942": 135916,
|
| 992 |
+
"s943": 135917,
|
| 993 |
+
"s944": 135918,
|
| 994 |
+
"s945": 135919,
|
| 995 |
+
"s946": 135920,
|
| 996 |
+
"s947": 135921,
|
| 997 |
+
"s948": 135922,
|
| 998 |
+
"s949": 135923,
|
| 999 |
+
"s950": 135924,
|
| 1000 |
+
"s951": 135925,
|
| 1001 |
+
"s952": 135926,
|
| 1002 |
+
"s953": 135927,
|
| 1003 |
+
"s954": 135928,
|
| 1004 |
+
"s955": 135929,
|
| 1005 |
+
"s956": 135930,
|
| 1006 |
+
"s957": 135931,
|
| 1007 |
+
"s958": 135932,
|
| 1008 |
+
"s959": 135933,
|
| 1009 |
+
"s960": 135934,
|
| 1010 |
+
"s961": 135935,
|
| 1011 |
+
"s962": 135936,
|
| 1012 |
+
"s963": 135937,
|
| 1013 |
+
"s964": 135938,
|
| 1014 |
+
"s965": 135939,
|
| 1015 |
+
"s966": 135940,
|
| 1016 |
+
"s967": 135941,
|
| 1017 |
+
"s968": 135942,
|
| 1018 |
+
"s969": 135943,
|
| 1019 |
+
"s970": 135944,
|
| 1020 |
+
"s971": 135945,
|
| 1021 |
+
"s972": 135946,
|
| 1022 |
+
"s973": 135947,
|
| 1023 |
+
"s974": 135948,
|
| 1024 |
+
"s975": 135949,
|
| 1025 |
+
"s976": 135950,
|
| 1026 |
+
"s977": 135951,
|
| 1027 |
+
"s978": 135952,
|
| 1028 |
+
"s979": 135953,
|
| 1029 |
+
"s980": 135954,
|
| 1030 |
+
"s981": 135955,
|
| 1031 |
+
"s982": 135956,
|
| 1032 |
+
"s983": 135957,
|
| 1033 |
+
"s984": 135958,
|
| 1034 |
+
"s985": 135959,
|
| 1035 |
+
"s986": 135960,
|
| 1036 |
+
"s987": 135961,
|
| 1037 |
+
"s988": 135962,
|
| 1038 |
+
"s989": 135963,
|
| 1039 |
+
"s990": 135964,
|
| 1040 |
+
"s991": 135965,
|
| 1041 |
+
"s992": 135966,
|
| 1042 |
+
"s993": 135967,
|
| 1043 |
+
"s994": 135968,
|
| 1044 |
+
"s995": 135969,
|
| 1045 |
+
"s996": 135970,
|
| 1046 |
+
"s997": 135971,
|
| 1047 |
+
"s998": 135972,
|
| 1048 |
+
"s999": 135973
|
| 1049 |
+
}
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{% for message in messages if message.role == 'user' and message.content is iterable and message.content is not string %}
|
| 2 |
+
{% for item in message.content if item.type == 'image' %}
|
| 3 |
+
{{- '<image>' -}}
|
| 4 |
+
{% endfor %}
|
| 5 |
+
{% endfor %}
|
| 6 |
+
|
| 7 |
+
{{- '<|begin_of_sentence|>' -}}
|
| 8 |
+
|
| 9 |
+
{%- set system_message = namespace(value=none) -%}
|
| 10 |
+
{%- for message in messages if message.role == 'system' -%}
|
| 11 |
+
{%- set system_message.value = message.content -%}
|
| 12 |
+
{%- endfor -%}
|
| 13 |
+
{%- if system_message.value -%}
|
| 14 |
+
{{- system_message.value -}}
|
| 15 |
+
{%- endif -%}
|
| 16 |
+
|
| 17 |
+
{%- for message in messages -%}
|
| 18 |
+
{%- if message.role == "user" -%}
|
| 19 |
+
{{- '<|User|>' -}}
|
| 20 |
+
{%- if message.content is string -%}
|
| 21 |
+
{{- message.content -}}
|
| 22 |
+
{%- elif message.content is iterable and message.content is not string -%}
|
| 23 |
+
{%- for item in message.content if item.type == "text" -%}
|
| 24 |
+
{{- item.text -}}
|
| 25 |
+
{%- endfor -%}
|
| 26 |
+
{%- endif -%}
|
| 27 |
+
|
| 28 |
+
{%- elif message.role == "assistant" -%}
|
| 29 |
+
{%- set thinking_tag = "" -%}
|
| 30 |
+
{%- if enable_thinking is defined -%}
|
| 31 |
+
{%- set thinking_tag = "</think>" if not enable_thinking else "<think>" -%}
|
| 32 |
+
{%- endif -%}
|
| 33 |
+
{{- '<|Assistant|>' + thinking_tag -}}
|
| 34 |
+
|
| 35 |
+
{%- if message.content is string -%}
|
| 36 |
+
{{- message.content -}}
|
| 37 |
+
{%- elif message.content is iterable and message.content is not string -%}
|
| 38 |
+
{%- for item in message.content if item.type == "text" -%}
|
| 39 |
+
{{- item.text -}}
|
| 40 |
+
{%- endfor -%}
|
| 41 |
+
{%- endif -%}
|
| 42 |
+
|
| 43 |
+
{{- '<|end_of_sentence|>' -}}
|
| 44 |
+
{%- endif -%}
|
| 45 |
+
{%- endfor -%}
|
| 46 |
+
|
| 47 |
+
{%- if add_generation_prompt -%}
|
| 48 |
+
{{- '<|Assistant|>' -}}
|
| 49 |
+
{%- if enable_thinking is defined -%}
|
| 50 |
+
{{- "</think>" if not enable_thinking else "<think>" -}}
|
| 51 |
+
{%- endif -%}
|
| 52 |
+
{%- endif -%}
|
config.json
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"YuanVLChatModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"auto_map": {
|
| 7 |
+
"AutoConfig": "configuration_yuanvl.YuanVLChatConfig",
|
| 8 |
+
"AutoModel": "modeling_yuanvl_chat.YuanVLChatModel",
|
| 9 |
+
"AutoModelForCausalLM": "modeling_yuanvl_chat.YuanVLChatModel"
|
| 10 |
+
},
|
| 11 |
+
"clip_download_path": "path/to/internvit-448",
|
| 12 |
+
"clip_model_name": "InternViT-448",
|
| 13 |
+
"downsample_ratio": 0.5,
|
| 14 |
+
"dynamic_image_size": true,
|
| 15 |
+
"eos_token_id": 77185,
|
| 16 |
+
"force_image_size": 448,
|
| 17 |
+
"imagemlp_recompute": true,
|
| 18 |
+
"img_context_token_id": 77188,
|
| 19 |
+
"llm_config": {
|
| 20 |
+
"_from_model_config": true,
|
| 21 |
+
"architectures": [
|
| 22 |
+
"YuanForCausalLM"
|
| 23 |
+
],
|
| 24 |
+
"attention_projection_size": 4096,
|
| 25 |
+
"attn_dropout": 0.0,
|
| 26 |
+
"attn_mask_type": "causal",
|
| 27 |
+
"auto_map": {
|
| 28 |
+
"AutoConfig": "configuration_yuanvl.YuanConfig",
|
| 29 |
+
"AutoModelForCausalLM": "yuanvl.YuanForCausalLM"
|
| 30 |
+
},
|
| 31 |
+
"bos_token": "<BOS>",
|
| 32 |
+
"bos_token_id": 134960,
|
| 33 |
+
"causal_mask": true,
|
| 34 |
+
"dropout": 0,
|
| 35 |
+
"eod_token": "<eod>",
|
| 36 |
+
"eod_token_id": 77185,
|
| 37 |
+
"ffn_hidden_size": 8192,
|
| 38 |
+
"head_dim": 256,
|
| 39 |
+
"hidden_act": "silu",
|
| 40 |
+
"hidden_size": 2048,
|
| 41 |
+
"initializer_range": 0.02,
|
| 42 |
+
"intermediate_size": 8192,
|
| 43 |
+
"lf_conv2d_group": 1,
|
| 44 |
+
"lf_conv2d_num_pad": 0,
|
| 45 |
+
"mask_token_id": 77185,
|
| 46 |
+
"max_position_embeddings": 131072,
|
| 47 |
+
"model_max_length": 131072,
|
| 48 |
+
"model_type": "yuan2",
|
| 49 |
+
"moe_config": {
|
| 50 |
+
"ffn_hidden_size": 8192,
|
| 51 |
+
"gated_linear_unit": true,
|
| 52 |
+
"moe_num_experts": 32,
|
| 53 |
+
"moe_top_k": 2,
|
| 54 |
+
"norm_topk_prob": true,
|
| 55 |
+
"num_attention_router_heads": 1,
|
| 56 |
+
"router_type": "attn_router"
|
| 57 |
+
},
|
| 58 |
+
"num_attention_heads": 16,
|
| 59 |
+
"num_hidden_layers": 24,
|
| 60 |
+
"output_router_logits": true,
|
| 61 |
+
"pad_token_id": 77188,
|
| 62 |
+
"rep_token": "<rep>",
|
| 63 |
+
"rep_token_id": 134962,
|
| 64 |
+
"reset_attention_mask": false,
|
| 65 |
+
"reset_position_ids": false,
|
| 66 |
+
"rms_norm_eps": 1e-06,
|
| 67 |
+
"rotary_base": 1000000,
|
| 68 |
+
"rotary_percent": 0.5,
|
| 69 |
+
"sep_token": "<sep>",
|
| 70 |
+
"sep_token_id": 77187,
|
| 71 |
+
"tie_word_embeddings": false,
|
| 72 |
+
"tokenizer_class": "YuanVLTokenizer",
|
| 73 |
+
"torch_dtype": "bfloat16",
|
| 74 |
+
"use_bias": false,
|
| 75 |
+
"use_cache": true,
|
| 76 |
+
"use_flash_attention": true,
|
| 77 |
+
"use_lf_gate": true,
|
| 78 |
+
"use_lfa_bias": true,
|
| 79 |
+
"use_loss_mask": false,
|
| 80 |
+
"use_moe": true,
|
| 81 |
+
"use_rope_scaling": false,
|
| 82 |
+
"vocab_size": 136064
|
| 83 |
+
},
|
| 84 |
+
"max_dynamic_patch": 9,
|
| 85 |
+
"min_dynamic_patch": 1,
|
| 86 |
+
"model_type": "yuanvl",
|
| 87 |
+
"output_attentions": false,
|
| 88 |
+
"ps_version": "v2",
|
| 89 |
+
"quantization_config": {
|
| 90 |
+
"config_groups": {
|
| 91 |
+
"group_0": {
|
| 92 |
+
"format": "pack-quantized",
|
| 93 |
+
"input_activations": null,
|
| 94 |
+
"output_activations": null,
|
| 95 |
+
"targets": [
|
| 96 |
+
"Linear"
|
| 97 |
+
],
|
| 98 |
+
"weights": {
|
| 99 |
+
"actorder": null,
|
| 100 |
+
"block_structure": null,
|
| 101 |
+
"dynamic": false,
|
| 102 |
+
"group_size": 128,
|
| 103 |
+
"num_bits": 4,
|
| 104 |
+
"observer": "minmax",
|
| 105 |
+
"observer_kwargs": {},
|
| 106 |
+
"strategy": "group",
|
| 107 |
+
"symmetric": true,
|
| 108 |
+
"type": "int"
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
},
|
| 112 |
+
"format": "pack-quantized",
|
| 113 |
+
"global_compression_ratio": null,
|
| 114 |
+
"ignore": [
|
| 115 |
+
".encoder.layers.0.attn.qkv",
|
| 116 |
+
".encoder.layers.0.attn.proj",
|
| 117 |
+
".encoder.layers.0.mlp.fc1",
|
| 118 |
+
".encoder.layers.0.mlp.fc2",
|
| 119 |
+
".encoder.layers.1.attn.qkv",
|
| 120 |
+
".encoder.layers.1.attn.proj",
|
| 121 |
+
".encoder.layers.1.mlp.fc1",
|
| 122 |
+
".encoder.layers.1.mlp.fc2",
|
| 123 |
+
".encoder.layers.2.attn.qkv",
|
| 124 |
+
".encoder.layers.2.attn.proj",
|
| 125 |
+
".encoder.layers.2.mlp.fc1",
|
| 126 |
+
".encoder.layers.2.mlp.fc2",
|
| 127 |
+
".encoder.layers.3.attn.qkv",
|
| 128 |
+
".encoder.layers.3.attn.proj",
|
| 129 |
+
".encoder.layers.3.mlp.fc1",
|
| 130 |
+
".encoder.layers.3.mlp.fc2",
|
| 131 |
+
".encoder.layers.4.attn.qkv",
|
| 132 |
+
".encoder.layers.4.attn.proj",
|
| 133 |
+
".encoder.layers.4.mlp.fc1",
|
| 134 |
+
".encoder.layers.4.mlp.fc2",
|
| 135 |
+
".encoder.layers.5.attn.qkv",
|
| 136 |
+
".encoder.layers.5.attn.proj",
|
| 137 |
+
".encoder.layers.5.mlp.fc1",
|
| 138 |
+
".encoder.layers.5.mlp.fc2",
|
| 139 |
+
".encoder.layers.6.attn.qkv",
|
| 140 |
+
".encoder.layers.6.attn.proj",
|
| 141 |
+
".encoder.layers.6.mlp.fc1",
|
| 142 |
+
".encoder.layers.6.mlp.fc2",
|
| 143 |
+
".encoder.layers.7.attn.qkv",
|
| 144 |
+
".encoder.layers.7.attn.proj",
|
| 145 |
+
".encoder.layers.7.mlp.fc1",
|
| 146 |
+
".encoder.layers.7.mlp.fc2",
|
| 147 |
+
".encoder.layers.8.attn.qkv",
|
| 148 |
+
".encoder.layers.8.attn.proj",
|
| 149 |
+
".encoder.layers.8.mlp.fc1",
|
| 150 |
+
".encoder.layers.8.mlp.fc2",
|
| 151 |
+
".encoder.layers.9.attn.qkv",
|
| 152 |
+
".encoder.layers.9.attn.proj",
|
| 153 |
+
".encoder.layers.9.mlp.fc1",
|
| 154 |
+
".encoder.layers.9.mlp.fc2",
|
| 155 |
+
".encoder.layers.10.attn.qkv",
|
| 156 |
+
".encoder.layers.10.attn.proj",
|
| 157 |
+
".encoder.layers.10.mlp.fc1",
|
| 158 |
+
".encoder.layers.10.mlp.fc2",
|
| 159 |
+
".encoder.layers.11.attn.qkv",
|
| 160 |
+
".encoder.layers.11.attn.proj",
|
| 161 |
+
".encoder.layers.11.mlp.fc1",
|
| 162 |
+
".encoder.layers.11.mlp.fc2",
|
| 163 |
+
".encoder.layers.12.attn.qkv",
|
| 164 |
+
".encoder.layers.12.attn.proj",
|
| 165 |
+
".encoder.layers.12.mlp.fc1",
|
| 166 |
+
".encoder.layers.12.mlp.fc2",
|
| 167 |
+
".encoder.layers.13.attn.qkv",
|
| 168 |
+
".encoder.layers.13.attn.proj",
|
| 169 |
+
".encoder.layers.13.mlp.fc1",
|
| 170 |
+
".encoder.layers.13.mlp.fc2",
|
| 171 |
+
".encoder.layers.14.attn.qkv",
|
| 172 |
+
".encoder.layers.14.attn.proj",
|
| 173 |
+
".encoder.layers.14.mlp.fc1",
|
| 174 |
+
".encoder.layers.14.mlp.fc2",
|
| 175 |
+
".encoder.layers.15.attn.qkv",
|
| 176 |
+
".encoder.layers.15.attn.proj",
|
| 177 |
+
".encoder.layers.15.mlp.fc1",
|
| 178 |
+
".encoder.layers.15.mlp.fc2",
|
| 179 |
+
".encoder.layers.16.attn.qkv",
|
| 180 |
+
".encoder.layers.16.attn.proj",
|
| 181 |
+
".encoder.layers.16.mlp.fc1",
|
| 182 |
+
".encoder.layers.16.mlp.fc2",
|
| 183 |
+
".encoder.layers.17.attn.qkv",
|
| 184 |
+
".encoder.layers.17.attn.proj",
|
| 185 |
+
".encoder.layers.17.mlp.fc1",
|
| 186 |
+
".encoder.layers.17.mlp.fc2",
|
| 187 |
+
".encoder.layers.18.attn.qkv",
|
| 188 |
+
".encoder.layers.18.attn.proj",
|
| 189 |
+
".encoder.layers.18.mlp.fc1",
|
| 190 |
+
".encoder.layers.18.mlp.fc2",
|
| 191 |
+
".encoder.layers.19.attn.qkv",
|
| 192 |
+
".encoder.layers.19.attn.proj",
|
| 193 |
+
".encoder.layers.19.mlp.fc1",
|
| 194 |
+
".encoder.layers.19.mlp.fc2",
|
| 195 |
+
".encoder.layers.20.attn.qkv",
|
| 196 |
+
".encoder.layers.20.attn.proj",
|
| 197 |
+
".encoder.layers.20.mlp.fc1",
|
| 198 |
+
".encoder.layers.20.mlp.fc2",
|
| 199 |
+
".encoder.layers.21.attn.qkv",
|
| 200 |
+
".encoder.layers.21.attn.proj",
|
| 201 |
+
".encoder.layers.21.mlp.fc1",
|
| 202 |
+
".encoder.layers.21.mlp.fc2",
|
| 203 |
+
".encoder.layers.22.attn.qkv",
|
| 204 |
+
".encoder.layers.22.attn.proj",
|
| 205 |
+
".encoder.layers.22.mlp.fc1",
|
| 206 |
+
".encoder.layers.22.mlp.fc2",
|
| 207 |
+
".encoder.layers.23.attn.qkv",
|
| 208 |
+
".encoder.layers.23.attn.proj",
|
| 209 |
+
".encoder.layers.23.mlp.fc1",
|
| 210 |
+
".encoder.layers.23.mlp.fc2",
|
| 211 |
+
"language_model.lm_head",
|
| 212 |
+
"language_model.model.layers.0.mlp.router.query_key_value",
|
| 213 |
+
"language_model.model.layers.1.mlp.router.query_key_value",
|
| 214 |
+
"language_model.model.layers.2.mlp.router.query_key_value",
|
| 215 |
+
"language_model.model.layers.3.mlp.router.query_key_value",
|
| 216 |
+
"language_model.model.layers.4.mlp.router.query_key_value",
|
| 217 |
+
"language_model.model.layers.5.mlp.router.query_key_value",
|
| 218 |
+
"language_model.model.layers.6.mlp.router.query_key_value",
|
| 219 |
+
"language_model.model.layers.7.mlp.router.query_key_value",
|
| 220 |
+
"language_model.model.layers.8.mlp.router.query_key_value",
|
| 221 |
+
"language_model.model.layers.9.mlp.router.query_key_value",
|
| 222 |
+
"language_model.model.layers.10.mlp.router.query_key_value",
|
| 223 |
+
"language_model.model.layers.11.mlp.router.query_key_value",
|
| 224 |
+
"language_model.model.layers.12.mlp.router.query_key_value",
|
| 225 |
+
"language_model.model.layers.13.mlp.router.query_key_value",
|
| 226 |
+
"language_model.model.layers.14.mlp.router.query_key_value",
|
| 227 |
+
"language_model.model.layers.15.mlp.router.query_key_value",
|
| 228 |
+
"language_model.model.layers.16.mlp.router.query_key_value",
|
| 229 |
+
"language_model.model.layers.17.mlp.router.query_key_value",
|
| 230 |
+
"language_model.model.layers.18.mlp.router.query_key_value",
|
| 231 |
+
"language_model.model.layers.19.mlp.router.query_key_value",
|
| 232 |
+
"language_model.model.layers.20.mlp.router.query_key_value",
|
| 233 |
+
"language_model.model.layers.21.mlp.router.query_key_value",
|
| 234 |
+
"language_model.model.layers.22.mlp.router.query_key_value",
|
| 235 |
+
"language_model.model.layers.23.mlp.router.query_key_value",
|
| 236 |
+
"imagemlp.up_proj",
|
| 237 |
+
"imagemlp.gate_proj",
|
| 238 |
+
"imagemlp.down_proj"
|
| 239 |
+
],
|
| 240 |
+
"kv_cache_scheme": null,
|
| 241 |
+
"quant_method": "compressed-tensors",
|
| 242 |
+
"quantization_status": "compressed",
|
| 243 |
+
"sparsity_config": {},
|
| 244 |
+
"transform_config": {},
|
| 245 |
+
"version": "0.11.0"
|
| 246 |
+
},
|
| 247 |
+
"select_layer": -1,
|
| 248 |
+
"template": "yuan-chat",
|
| 249 |
+
"torch_dtype": "bfloat16",
|
| 250 |
+
"transformers_version": null,
|
| 251 |
+
"use_backbone_lora": 0,
|
| 252 |
+
"use_cache": true,
|
| 253 |
+
"use_llm_lora": 0,
|
| 254 |
+
"use_thumbnail": true,
|
| 255 |
+
"vision_config": {
|
| 256 |
+
"architectures": [
|
| 257 |
+
"InternVisionModel"
|
| 258 |
+
],
|
| 259 |
+
"attention_dropout": 0.0,
|
| 260 |
+
"drop_path_rate": 0.0,
|
| 261 |
+
"dropout": 0.0,
|
| 262 |
+
"hidden_act": "gelu",
|
| 263 |
+
"hidden_size": 1024,
|
| 264 |
+
"image_size": 448,
|
| 265 |
+
"initializer_factor": 1.0,
|
| 266 |
+
"initializer_range": 0.02,
|
| 267 |
+
"intermediate_size": 4096,
|
| 268 |
+
"layer_norm_eps": 1e-06,
|
| 269 |
+
"model_type": "intern_vit_6b",
|
| 270 |
+
"norm_type": "layer_norm",
|
| 271 |
+
"num_attention_heads": 16,
|
| 272 |
+
"num_channels": 3,
|
| 273 |
+
"num_hidden_layers": 24,
|
| 274 |
+
"patch_size": 14,
|
| 275 |
+
"qk_normalization": false,
|
| 276 |
+
"qkv_bias": true,
|
| 277 |
+
"torch_dtype": "bfloat16",
|
| 278 |
+
"use_bfloat16": true,
|
| 279 |
+
"use_flash_attn": false
|
| 280 |
+
}
|
| 281 |
+
}
|
configuration_intern_vit.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
from typing import Union
|
| 9 |
+
|
| 10 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 11 |
+
from transformers.utils import logging
|
| 12 |
+
|
| 13 |
+
logger = logging.get_logger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class InternVisionConfig(PretrainedConfig):
|
| 17 |
+
r"""
|
| 18 |
+
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
| 19 |
+
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
| 20 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 21 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 22 |
+
Args:
|
| 23 |
+
num_channels (`int`, *optional*, defaults to 3):
|
| 24 |
+
Number of color channels in the input images (e.g., 3 for RGB).
|
| 25 |
+
patch_size (`int`, *optional*, defaults to 14):
|
| 26 |
+
The size (resolution) of each patch.
|
| 27 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 28 |
+
The size (resolution) of each image.
|
| 29 |
+
qkv_bias (`bool`, *optional*, defaults to `False`):
|
| 30 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
| 31 |
+
hidden_size (`int`, *optional*, defaults to 3200):
|
| 32 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 33 |
+
num_attention_heads (`int`, *optional*, defaults to 25):
|
| 34 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 35 |
+
intermediate_size (`int`, *optional*, defaults to 12800):
|
| 36 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
| 37 |
+
qk_normalization (`bool`, *optional*, defaults to `True`):
|
| 38 |
+
Whether to normalize the queries and keys in the self-attention layers.
|
| 39 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
| 40 |
+
Number of hidden layers in the Transformer encoder.
|
| 41 |
+
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
| 42 |
+
Whether to use flash attention mechanism.
|
| 43 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| 44 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 45 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
| 46 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
| 47 |
+
The epsilon used by the layer normalization layers.
|
| 48 |
+
dropout (`float`, *optional*, defaults to 0.0):
|
| 49 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 50 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
| 51 |
+
Dropout rate for stochastic depth.
|
| 52 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 53 |
+
The dropout ratio for the attention probabilities.
|
| 54 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 55 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 56 |
+
initializer_factor (`float`, *optional*, defaults to 0.1):
|
| 57 |
+
A factor for layer scale.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
model_type = 'intern_vit_6b'
|
| 61 |
+
|
| 62 |
+
def __init__(
|
| 63 |
+
self,
|
| 64 |
+
num_channels=3,
|
| 65 |
+
patch_size=14,
|
| 66 |
+
image_size=224,
|
| 67 |
+
qkv_bias=False,
|
| 68 |
+
hidden_size=3200,
|
| 69 |
+
num_attention_heads=25,
|
| 70 |
+
intermediate_size=12800,
|
| 71 |
+
qk_normalization=True,
|
| 72 |
+
num_hidden_layers=48,
|
| 73 |
+
use_flash_attn=True,
|
| 74 |
+
hidden_act='gelu',
|
| 75 |
+
norm_type='rms_norm',
|
| 76 |
+
layer_norm_eps=1e-6,
|
| 77 |
+
dropout=0.0,
|
| 78 |
+
drop_path_rate=0.0,
|
| 79 |
+
attention_dropout=0.0,
|
| 80 |
+
initializer_range=0.02,
|
| 81 |
+
initializer_factor=0.1,
|
| 82 |
+
**kwargs,
|
| 83 |
+
):
|
| 84 |
+
super().__init__(**kwargs)
|
| 85 |
+
|
| 86 |
+
self.hidden_size = hidden_size
|
| 87 |
+
self.intermediate_size = intermediate_size
|
| 88 |
+
self.dropout = dropout
|
| 89 |
+
self.drop_path_rate = drop_path_rate
|
| 90 |
+
self.num_hidden_layers = num_hidden_layers
|
| 91 |
+
self.num_attention_heads = num_attention_heads
|
| 92 |
+
self.num_channels = num_channels
|
| 93 |
+
self.patch_size = patch_size
|
| 94 |
+
self.image_size = image_size
|
| 95 |
+
self.initializer_range = initializer_range
|
| 96 |
+
self.initializer_factor = initializer_factor
|
| 97 |
+
self.attention_dropout = attention_dropout
|
| 98 |
+
self.layer_norm_eps = layer_norm_eps
|
| 99 |
+
self.hidden_act = hidden_act
|
| 100 |
+
self.norm_type = norm_type
|
| 101 |
+
self.qkv_bias = qkv_bias
|
| 102 |
+
self.qk_normalization = qk_normalization
|
| 103 |
+
self.use_flash_attn = use_flash_attn
|
| 104 |
+
|
| 105 |
+
@classmethod
|
| 106 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
| 107 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
| 108 |
+
|
| 109 |
+
if 'vision_config' in config_dict:
|
| 110 |
+
config_dict = config_dict['vision_config']
|
| 111 |
+
|
| 112 |
+
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
| 113 |
+
logger.warning(
|
| 114 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
| 115 |
+
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
return cls.from_dict(config_dict, **kwargs)
|
configuration_yuan.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class YuanConfig(PretrainedConfig):
|
| 6 |
+
model_type = "yuan2"
|
| 7 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 8 |
+
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
vocab_size=135040,
|
| 12 |
+
hidden_size=2048,
|
| 13 |
+
ffn_hidden_size=8192,
|
| 14 |
+
intermediate_size=8192,
|
| 15 |
+
num_hidden_layers=24,
|
| 16 |
+
num_attention_heads=32,
|
| 17 |
+
hidden_act="silu",
|
| 18 |
+
model_max_length=8192,
|
| 19 |
+
initializer_range=0.02,
|
| 20 |
+
rms_norm_eps=1e-6,
|
| 21 |
+
use_cache=True,
|
| 22 |
+
pad_token_id=77185,
|
| 23 |
+
bos_token_id=77185,
|
| 24 |
+
eos_token_id=77185,
|
| 25 |
+
tie_word_embeddings=True,
|
| 26 |
+
**kwargs,
|
| 27 |
+
):
|
| 28 |
+
self.vocab_size = vocab_size
|
| 29 |
+
self.model_max_length = model_max_length
|
| 30 |
+
self.hidden_size = hidden_size
|
| 31 |
+
self.ffn_hidden_size = ffn_hidden_size
|
| 32 |
+
self.intermediate_size = intermediate_size
|
| 33 |
+
self.num_hidden_layers = num_hidden_layers
|
| 34 |
+
self.num_attention_heads = num_attention_heads
|
| 35 |
+
self.hidden_act = hidden_act
|
| 36 |
+
self.initializer_range = initializer_range
|
| 37 |
+
self.rms_norm_eps = rms_norm_eps
|
| 38 |
+
self.use_cache = use_cache
|
| 39 |
+
super().__init__(
|
| 40 |
+
pad_token_id=pad_token_id,
|
| 41 |
+
bos_token_id=bos_token_id,
|
| 42 |
+
eos_token_id=eos_token_id,
|
| 43 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 44 |
+
**kwargs,
|
| 45 |
+
)
|
| 46 |
+
|
configuration_yuanvl.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import copy
|
| 8 |
+
|
| 9 |
+
from transformers import AutoConfig, LlamaConfig
|
| 10 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 11 |
+
from transformers.utils import logging
|
| 12 |
+
|
| 13 |
+
from .configuration_intern_vit import InternVisionConfig
|
| 14 |
+
from .configuration_yuan import YuanConfig
|
| 15 |
+
|
| 16 |
+
logger = logging.get_logger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class YuanVLChatConfig(PretrainedConfig):
|
| 20 |
+
model_type = 'yuanvl'
|
| 21 |
+
is_composition = True
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
vision_config=None,
|
| 26 |
+
llm_config=None,
|
| 27 |
+
use_backbone_lora=0,
|
| 28 |
+
use_llm_lora=0,
|
| 29 |
+
select_layer=-1,
|
| 30 |
+
force_image_size=None,
|
| 31 |
+
downsample_ratio=0.5,
|
| 32 |
+
template=None,
|
| 33 |
+
dynamic_image_size=False,
|
| 34 |
+
use_thumbnail=False,
|
| 35 |
+
ps_version='v1',
|
| 36 |
+
min_dynamic_patch=1,
|
| 37 |
+
max_dynamic_patch=6,
|
| 38 |
+
img_context_token_id=77188,
|
| 39 |
+
**kwargs):
|
| 40 |
+
super().__init__(**kwargs)
|
| 41 |
+
|
| 42 |
+
if vision_config is None:
|
| 43 |
+
vision_config = {'architectures': ['InternVisionModel']}
|
| 44 |
+
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
| 45 |
+
|
| 46 |
+
if llm_config is None:
|
| 47 |
+
llm_config = {'architectures': ['YuanForCausalLM']}
|
| 48 |
+
logger.info('llm_config is None. Initializing the YuanForCausalLM config with default values (`YuanForCausalLM`).')
|
| 49 |
+
|
| 50 |
+
self.vision_config = InternVisionConfig(**vision_config)
|
| 51 |
+
if llm_config.get('architectures')[0] == 'YuanForCausalLM':
|
| 52 |
+
self.llm_config = YuanConfig(**llm_config)
|
| 53 |
+
else:
|
| 54 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config.get('architectures')[0]))
|
| 55 |
+
self.use_backbone_lora = use_backbone_lora
|
| 56 |
+
self.use_llm_lora = use_llm_lora
|
| 57 |
+
self.select_layer = select_layer
|
| 58 |
+
self.force_image_size = force_image_size
|
| 59 |
+
self.downsample_ratio = downsample_ratio
|
| 60 |
+
self.template = template
|
| 61 |
+
self.dynamic_image_size = dynamic_image_size
|
| 62 |
+
self.use_thumbnail = use_thumbnail
|
| 63 |
+
self.ps_version = ps_version # pixel shuffle version
|
| 64 |
+
self.min_dynamic_patch = min_dynamic_patch
|
| 65 |
+
self.max_dynamic_patch = max_dynamic_patch
|
| 66 |
+
self.img_context_token_id = img_context_token_id
|
| 67 |
+
|
| 68 |
+
logger.info(f'vision_select_layer: {self.select_layer}')
|
| 69 |
+
logger.info(f'ps_version: {self.ps_version}')
|
| 70 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
| 71 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
| 72 |
+
|
| 73 |
+
def to_dict(self):
|
| 74 |
+
"""
|
| 75 |
+
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
| 76 |
+
Returns:
|
| 77 |
+
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
| 78 |
+
"""
|
| 79 |
+
output = copy.deepcopy(self.__dict__)
|
| 80 |
+
output['vision_config'] = self.vision_config.to_dict()
|
| 81 |
+
output['llm_config'] = self.llm_config.to_dict()
|
| 82 |
+
output['model_type'] = self.__class__.model_type
|
| 83 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
| 84 |
+
output['use_llm_lora'] = self.use_llm_lora
|
| 85 |
+
output['select_layer'] = self.select_layer
|
| 86 |
+
output['force_image_size'] = self.force_image_size
|
| 87 |
+
output['downsample_ratio'] = self.downsample_ratio
|
| 88 |
+
output['template'] = self.template
|
| 89 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
| 90 |
+
output['use_thumbnail'] = self.use_thumbnail
|
| 91 |
+
output['ps_version'] = self.ps_version
|
| 92 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
| 93 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
| 94 |
+
|
| 95 |
+
return output
|
conversation.py
ADDED
|
@@ -0,0 +1,399 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Conversation prompt templates.
|
| 3 |
+
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
| 4 |
+
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
| 5 |
+
Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import dataclasses
|
| 9 |
+
from enum import IntEnum, auto
|
| 10 |
+
from typing import Dict, List, Tuple, Union
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class SeparatorStyle(IntEnum):
|
| 14 |
+
"""Separator styles."""
|
| 15 |
+
|
| 16 |
+
ADD_COLON_SINGLE = auto()
|
| 17 |
+
ADD_COLON_TWO = auto()
|
| 18 |
+
ADD_COLON_SPACE_SINGLE = auto()
|
| 19 |
+
NO_COLON_SINGLE = auto()
|
| 20 |
+
NO_COLON_TWO = auto()
|
| 21 |
+
ADD_NEW_LINE_SINGLE = auto()
|
| 22 |
+
LLAMA2 = auto()
|
| 23 |
+
CHATGLM = auto()
|
| 24 |
+
CHATML = auto()
|
| 25 |
+
CHATINTERN = auto()
|
| 26 |
+
DOLLY = auto()
|
| 27 |
+
RWKV = auto()
|
| 28 |
+
PHOENIX = auto()
|
| 29 |
+
ROBIN = auto()
|
| 30 |
+
FALCON_CHAT = auto()
|
| 31 |
+
CHATGLM3 = auto()
|
| 32 |
+
INTERNVL_ZH = auto()
|
| 33 |
+
MPT = auto()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclasses.dataclass
|
| 37 |
+
class Conversation:
|
| 38 |
+
"""A class that manages prompt templates and keeps all conversation history."""
|
| 39 |
+
|
| 40 |
+
# The name of this template
|
| 41 |
+
name: str
|
| 42 |
+
# The template of the system prompt
|
| 43 |
+
system_template: str = '{system_message}'
|
| 44 |
+
# The system message
|
| 45 |
+
system_message: str = ''
|
| 46 |
+
# The names of two roles
|
| 47 |
+
roles: Tuple[str] = ('USER', 'ASSISTANT')
|
| 48 |
+
# All messages. Each item is (role, message).
|
| 49 |
+
messages: List[List[str]] = ()
|
| 50 |
+
# The number of few shot examples
|
| 51 |
+
offset: int = 0
|
| 52 |
+
# The separator style and configurations
|
| 53 |
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
| 54 |
+
sep: str = '\n'
|
| 55 |
+
sep2: str = None
|
| 56 |
+
# Stop criteria (the default one is EOS token)
|
| 57 |
+
stop_str: Union[str, List[str]] = None
|
| 58 |
+
# Stops generation if meeting any token in this list
|
| 59 |
+
stop_token_ids: List[int] = None
|
| 60 |
+
|
| 61 |
+
def get_prompt(self) -> str:
|
| 62 |
+
"""Get the prompt for generation."""
|
| 63 |
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
| 64 |
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
| 65 |
+
ret = system_prompt + self.sep
|
| 66 |
+
for role, message in self.messages:
|
| 67 |
+
if message:
|
| 68 |
+
ret += role + ': ' + message + self.sep
|
| 69 |
+
else:
|
| 70 |
+
ret += role + ':'
|
| 71 |
+
return ret
|
| 72 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
| 73 |
+
seps = [self.sep, self.sep2]
|
| 74 |
+
ret = system_prompt + seps[0]
|
| 75 |
+
for i, (role, message) in enumerate(self.messages):
|
| 76 |
+
if message:
|
| 77 |
+
ret += role + ': ' + message + seps[i % 2]
|
| 78 |
+
else:
|
| 79 |
+
ret += role + ':'
|
| 80 |
+
return ret
|
| 81 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
| 82 |
+
ret = system_prompt + self.sep
|
| 83 |
+
for role, message in self.messages:
|
| 84 |
+
if message:
|
| 85 |
+
ret += role + ': ' + message + self.sep
|
| 86 |
+
else:
|
| 87 |
+
ret += role + ': ' # must be end with a space
|
| 88 |
+
return ret
|
| 89 |
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
| 90 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep
|
| 91 |
+
for role, message in self.messages:
|
| 92 |
+
if message:
|
| 93 |
+
ret += role + '\n' + message + self.sep
|
| 94 |
+
else:
|
| 95 |
+
ret += role + '\n'
|
| 96 |
+
return ret
|
| 97 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
| 98 |
+
ret = system_prompt
|
| 99 |
+
for role, message in self.messages:
|
| 100 |
+
if message:
|
| 101 |
+
ret += role + message + self.sep
|
| 102 |
+
else:
|
| 103 |
+
ret += role
|
| 104 |
+
return ret
|
| 105 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
| 106 |
+
seps = [self.sep, self.sep2]
|
| 107 |
+
ret = system_prompt
|
| 108 |
+
for i, (role, message) in enumerate(self.messages):
|
| 109 |
+
if message:
|
| 110 |
+
ret += role + message + seps[i % 2]
|
| 111 |
+
else:
|
| 112 |
+
ret += role
|
| 113 |
+
return ret
|
| 114 |
+
elif self.sep_style == SeparatorStyle.RWKV:
|
| 115 |
+
ret = system_prompt
|
| 116 |
+
for i, (role, message) in enumerate(self.messages):
|
| 117 |
+
if message:
|
| 118 |
+
ret += (
|
| 119 |
+
role
|
| 120 |
+
+ ': '
|
| 121 |
+
+ message.replace('\r\n', '\n').replace('\n\n', '\n')
|
| 122 |
+
)
|
| 123 |
+
ret += '\n\n'
|
| 124 |
+
else:
|
| 125 |
+
ret += role + ':'
|
| 126 |
+
return ret
|
| 127 |
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
| 128 |
+
seps = [self.sep, self.sep2]
|
| 129 |
+
if self.system_message:
|
| 130 |
+
ret = system_prompt
|
| 131 |
+
else:
|
| 132 |
+
ret = '[INST] '
|
| 133 |
+
for i, (role, message) in enumerate(self.messages):
|
| 134 |
+
tag = self.roles[i % 2]
|
| 135 |
+
if message:
|
| 136 |
+
if i == 0:
|
| 137 |
+
ret += message + ' '
|
| 138 |
+
else:
|
| 139 |
+
ret += tag + ' ' + message + seps[i % 2]
|
| 140 |
+
else:
|
| 141 |
+
ret += tag
|
| 142 |
+
return ret
|
| 143 |
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
| 144 |
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
| 145 |
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
| 146 |
+
round_add_n = 1 if self.name == 'chatglm2' else 0
|
| 147 |
+
if system_prompt:
|
| 148 |
+
ret = system_prompt + self.sep
|
| 149 |
+
else:
|
| 150 |
+
ret = ''
|
| 151 |
+
|
| 152 |
+
for i, (role, message) in enumerate(self.messages):
|
| 153 |
+
if i % 2 == 0:
|
| 154 |
+
ret += f'[Round {i//2 + round_add_n}]{self.sep}'
|
| 155 |
+
|
| 156 |
+
if message:
|
| 157 |
+
ret += f'{role}:{message}{self.sep}'
|
| 158 |
+
else:
|
| 159 |
+
ret += f'{role}:'
|
| 160 |
+
return ret
|
| 161 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
| 162 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
|
| 163 |
+
for role, message in self.messages:
|
| 164 |
+
if message:
|
| 165 |
+
ret += role + '\n' + message + self.sep + '\n'
|
| 166 |
+
else:
|
| 167 |
+
ret += role + '\n'
|
| 168 |
+
return ret
|
| 169 |
+
elif self.sep_style == SeparatorStyle.CHATGLM3:
|
| 170 |
+
ret = ''
|
| 171 |
+
if self.system_message:
|
| 172 |
+
ret += system_prompt
|
| 173 |
+
for role, message in self.messages:
|
| 174 |
+
if message:
|
| 175 |
+
ret += role + '\n' + ' ' + message
|
| 176 |
+
else:
|
| 177 |
+
ret += role
|
| 178 |
+
return ret
|
| 179 |
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
| 180 |
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
| 181 |
+
seps = [self.sep, self.sep2]
|
| 182 |
+
ret = system_prompt
|
| 183 |
+
for i, (role, message) in enumerate(self.messages):
|
| 184 |
+
# if i % 2 == 0:
|
| 185 |
+
# ret += "<s>"
|
| 186 |
+
if message:
|
| 187 |
+
ret += role + ':' + message + seps[i % 2] + '\n'
|
| 188 |
+
else:
|
| 189 |
+
ret += role + ':'
|
| 190 |
+
return ret
|
| 191 |
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
| 192 |
+
seps = [self.sep, self.sep2]
|
| 193 |
+
ret = system_prompt
|
| 194 |
+
for i, (role, message) in enumerate(self.messages):
|
| 195 |
+
if message:
|
| 196 |
+
ret += role + ':\n' + message + seps[i % 2]
|
| 197 |
+
if i % 2 == 1:
|
| 198 |
+
ret += '\n\n'
|
| 199 |
+
else:
|
| 200 |
+
ret += role + ':\n'
|
| 201 |
+
return ret
|
| 202 |
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
| 203 |
+
ret = system_prompt
|
| 204 |
+
for role, message in self.messages:
|
| 205 |
+
if message:
|
| 206 |
+
ret += role + ': ' + '<s>' + message + '</s>'
|
| 207 |
+
else:
|
| 208 |
+
ret += role + ': ' + '<s>'
|
| 209 |
+
return ret
|
| 210 |
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
| 211 |
+
ret = system_prompt + self.sep
|
| 212 |
+
for role, message in self.messages:
|
| 213 |
+
if message:
|
| 214 |
+
ret += role + ':\n' + message + self.sep
|
| 215 |
+
else:
|
| 216 |
+
ret += role + ':\n'
|
| 217 |
+
return ret
|
| 218 |
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
| 219 |
+
ret = ''
|
| 220 |
+
if self.system_message:
|
| 221 |
+
ret += system_prompt + self.sep
|
| 222 |
+
for role, message in self.messages:
|
| 223 |
+
if message:
|
| 224 |
+
ret += role + ': ' + message + self.sep
|
| 225 |
+
else:
|
| 226 |
+
ret += role + ':'
|
| 227 |
+
|
| 228 |
+
return ret
|
| 229 |
+
elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
|
| 230 |
+
seps = [self.sep, self.sep2]
|
| 231 |
+
ret = self.system_message + seps[0]
|
| 232 |
+
for i, (role, message) in enumerate(self.messages):
|
| 233 |
+
if message:
|
| 234 |
+
ret += role + ': ' + message + seps[i % 2]
|
| 235 |
+
else:
|
| 236 |
+
ret += role + ':'
|
| 237 |
+
return ret
|
| 238 |
+
elif self.sep_style == SeparatorStyle.MPT:
|
| 239 |
+
ret = system_prompt + self.sep
|
| 240 |
+
for role, message in self.messages:
|
| 241 |
+
if message:
|
| 242 |
+
if type(message) is tuple:
|
| 243 |
+
message, _, _ = message
|
| 244 |
+
ret += role + message + self.sep
|
| 245 |
+
else:
|
| 246 |
+
ret += role
|
| 247 |
+
return ret
|
| 248 |
+
else:
|
| 249 |
+
raise ValueError(f'Invalid style: {self.sep_style}')
|
| 250 |
+
|
| 251 |
+
def set_system_message(self, system_message: str):
|
| 252 |
+
"""Set the system message."""
|
| 253 |
+
self.system_message = system_message
|
| 254 |
+
|
| 255 |
+
def append_message(self, role: str, message: str):
|
| 256 |
+
"""Append a new message."""
|
| 257 |
+
self.messages.append([role, message])
|
| 258 |
+
|
| 259 |
+
def update_last_message(self, message: str):
|
| 260 |
+
"""Update the last output.
|
| 261 |
+
The last message is typically set to be None when constructing the prompt,
|
| 262 |
+
so we need to update it in-place after getting the response from a model.
|
| 263 |
+
"""
|
| 264 |
+
self.messages[-1][1] = message
|
| 265 |
+
|
| 266 |
+
def to_gradio_chatbot(self):
|
| 267 |
+
"""Convert the conversation to gradio chatbot format."""
|
| 268 |
+
ret = []
|
| 269 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
| 270 |
+
if i % 2 == 0:
|
| 271 |
+
ret.append([msg, None])
|
| 272 |
+
else:
|
| 273 |
+
ret[-1][-1] = msg
|
| 274 |
+
return ret
|
| 275 |
+
|
| 276 |
+
def to_openai_api_messages(self):
|
| 277 |
+
"""Convert the conversation to OpenAI chat completion format."""
|
| 278 |
+
ret = [{'role': 'system', 'content': self.system_message}]
|
| 279 |
+
|
| 280 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
| 281 |
+
if i % 2 == 0:
|
| 282 |
+
ret.append({'role': 'user', 'content': msg})
|
| 283 |
+
else:
|
| 284 |
+
if msg is not None:
|
| 285 |
+
ret.append({'role': 'assistant', 'content': msg})
|
| 286 |
+
return ret
|
| 287 |
+
|
| 288 |
+
def copy(self):
|
| 289 |
+
return Conversation(
|
| 290 |
+
name=self.name,
|
| 291 |
+
system_template=self.system_template,
|
| 292 |
+
system_message=self.system_message,
|
| 293 |
+
roles=self.roles,
|
| 294 |
+
messages=[[x, y] for x, y in self.messages],
|
| 295 |
+
offset=self.offset,
|
| 296 |
+
sep_style=self.sep_style,
|
| 297 |
+
sep=self.sep,
|
| 298 |
+
sep2=self.sep2,
|
| 299 |
+
stop_str=self.stop_str,
|
| 300 |
+
stop_token_ids=self.stop_token_ids,
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
def dict(self):
|
| 304 |
+
return {
|
| 305 |
+
'template_name': self.name,
|
| 306 |
+
'system_message': self.system_message,
|
| 307 |
+
'roles': self.roles,
|
| 308 |
+
'messages': self.messages,
|
| 309 |
+
'offset': self.offset,
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
# A global registry for all conversation templates
|
| 314 |
+
conv_templates: Dict[str, Conversation] = {}
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def register_conv_template(template: Conversation, override: bool = False):
|
| 318 |
+
"""Register a new conversation template."""
|
| 319 |
+
if not override:
|
| 320 |
+
assert (
|
| 321 |
+
template.name not in conv_templates
|
| 322 |
+
), f'{template.name} has been registered.'
|
| 323 |
+
|
| 324 |
+
conv_templates[template.name] = template
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def get_conv_template(name: str) -> Conversation:
|
| 328 |
+
"""Get a conversation template."""
|
| 329 |
+
return conv_templates[name].copy()
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
# Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
|
| 333 |
+
# is that during training, the preprocessing function for the Hermes-2 template doesn't add
|
| 334 |
+
# <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
|
| 335 |
+
# Therefore, they are completely equivalent during inference.
|
| 336 |
+
register_conv_template(
|
| 337 |
+
Conversation(
|
| 338 |
+
name='Hermes-2',
|
| 339 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 340 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 341 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 342 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 343 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 344 |
+
sep_style=SeparatorStyle.MPT,
|
| 345 |
+
sep='<|im_end|>',
|
| 346 |
+
stop_str='<|endoftext|>',
|
| 347 |
+
)
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
register_conv_template(
|
| 352 |
+
Conversation(
|
| 353 |
+
name='internlm2-chat',
|
| 354 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 355 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 356 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 357 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 358 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 359 |
+
sep_style=SeparatorStyle.MPT,
|
| 360 |
+
sep='<|im_end|>',
|
| 361 |
+
)
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
register_conv_template(
|
| 366 |
+
Conversation(
|
| 367 |
+
name='phi3-chat',
|
| 368 |
+
system_template='<|system|>\n{system_message}',
|
| 369 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
| 370 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 371 |
+
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
| 372 |
+
roles=('<|user|>\n', '<|assistant|>\n'),
|
| 373 |
+
sep_style=SeparatorStyle.MPT,
|
| 374 |
+
sep='<|end|>',
|
| 375 |
+
)
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
register_conv_template(
|
| 380 |
+
Conversation(
|
| 381 |
+
name='internvl2_5',
|
| 382 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 383 |
+
system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
| 384 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 385 |
+
sep_style=SeparatorStyle.MPT,
|
| 386 |
+
sep='<|im_end|>\n',
|
| 387 |
+
)
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
register_conv_template(
|
| 391 |
+
Conversation(
|
| 392 |
+
name='yuan-chat',
|
| 393 |
+
system_template='<|im_start|>system\n{system_message}',
|
| 394 |
+
system_message='你是IEI-源多模态模型,英文名是YuanVL,是由浪潮信息开发的多模态大语言模型。',
|
| 395 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
| 396 |
+
sep_style=SeparatorStyle.MPT,
|
| 397 |
+
sep='<|im_end|>\n',
|
| 398 |
+
)
|
| 399 |
+
)
|
flash_attention.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# https://github.com/Dao-AILab/flash-attention/blob/v0.2.8/flash_attn/flash_attention.py
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from einops import rearrange
|
| 5 |
+
|
| 6 |
+
try: # v1
|
| 7 |
+
from flash_attn.flash_attn_interface import \
|
| 8 |
+
flash_attn_unpadded_qkvpacked_func
|
| 9 |
+
except: # v2
|
| 10 |
+
from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
|
| 11 |
+
|
| 12 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class FlashAttention(nn.Module):
|
| 16 |
+
"""Implement the scaled dot product attention with softmax.
|
| 17 |
+
Arguments
|
| 18 |
+
---------
|
| 19 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 20 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 21 |
+
runtime)
|
| 22 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 23 |
+
(default: 0.0)
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
|
| 27 |
+
super().__init__()
|
| 28 |
+
self.softmax_scale = softmax_scale
|
| 29 |
+
self.dropout_p = attention_dropout
|
| 30 |
+
|
| 31 |
+
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
|
| 32 |
+
max_s=None, need_weights=False):
|
| 33 |
+
"""Implements the multihead softmax attention.
|
| 34 |
+
Arguments
|
| 35 |
+
---------
|
| 36 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
| 37 |
+
if unpadded: (nnz, 3, h, d)
|
| 38 |
+
key_padding_mask: a bool tensor of shape (B, S)
|
| 39 |
+
"""
|
| 40 |
+
assert not need_weights
|
| 41 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
| 42 |
+
assert qkv.is_cuda
|
| 43 |
+
|
| 44 |
+
if cu_seqlens is None:
|
| 45 |
+
batch_size = qkv.shape[0]
|
| 46 |
+
seqlen = qkv.shape[1]
|
| 47 |
+
if key_padding_mask is None:
|
| 48 |
+
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
|
| 49 |
+
max_s = seqlen
|
| 50 |
+
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
| 51 |
+
device=qkv.device)
|
| 52 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
| 53 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 54 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 55 |
+
)
|
| 56 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
| 57 |
+
else:
|
| 58 |
+
nheads = qkv.shape[-2]
|
| 59 |
+
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
| 60 |
+
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
| 61 |
+
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
| 62 |
+
output_unpad = flash_attn_unpadded_qkvpacked_func(
|
| 63 |
+
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 64 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 65 |
+
)
|
| 66 |
+
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
|
| 67 |
+
indices, batch_size, seqlen),
|
| 68 |
+
'b s (h d) -> b s h d', h=nheads)
|
| 69 |
+
else:
|
| 70 |
+
assert max_s is not None
|
| 71 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
| 72 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
| 73 |
+
softmax_scale=self.softmax_scale, causal=causal
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
return output, None
|
generation_config.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"eos_token_id": 77185,
|
| 4 |
+
"transformers_version": "4.55.2"
|
| 5 |
+
}
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
modeling_intern_vit.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# InternVL
|
| 3 |
+
# Copyright (c) 2023 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
from typing import Optional, Tuple, Union
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
import torch.utils.checkpoint
|
| 11 |
+
from einops import rearrange
|
| 12 |
+
from timm.models.layers import DropPath
|
| 13 |
+
from torch import nn
|
| 14 |
+
from transformers.activations import ACT2FN
|
| 15 |
+
from transformers.modeling_outputs import (BaseModelOutput,
|
| 16 |
+
BaseModelOutputWithPooling)
|
| 17 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 18 |
+
from transformers.utils import logging
|
| 19 |
+
|
| 20 |
+
from .configuration_intern_vit import InternVisionConfig
|
| 21 |
+
#try:
|
| 22 |
+
from .flash_attention import FlashAttention
|
| 23 |
+
has_flash_attn = True
|
| 24 |
+
#except:
|
| 25 |
+
# print('FlashAttention is not installed.')
|
| 26 |
+
# has_flash_attn = False
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class InternRMSNorm(nn.Module):
|
| 33 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 34 |
+
super().__init__()
|
| 35 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 36 |
+
self.variance_epsilon = eps
|
| 37 |
+
|
| 38 |
+
def forward(self, hidden_states):
|
| 39 |
+
input_dtype = hidden_states.dtype
|
| 40 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 41 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 42 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 43 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
try:
|
| 47 |
+
from apex.normalization import FusedRMSNorm
|
| 48 |
+
|
| 49 |
+
InternRMSNorm = FusedRMSNorm # noqa
|
| 50 |
+
|
| 51 |
+
logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
|
| 52 |
+
except ImportError:
|
| 53 |
+
# using the normal InternRMSNorm
|
| 54 |
+
pass
|
| 55 |
+
except Exception:
|
| 56 |
+
logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
NORM2FN = {
|
| 61 |
+
'rms_norm': InternRMSNorm,
|
| 62 |
+
'layer_norm': nn.LayerNorm,
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class InternVisionEmbeddings(nn.Module):
|
| 67 |
+
def __init__(self, config: InternVisionConfig):
|
| 68 |
+
super().__init__()
|
| 69 |
+
self.config = config
|
| 70 |
+
self.embed_dim = config.hidden_size
|
| 71 |
+
self.image_size = config.image_size
|
| 72 |
+
self.patch_size = config.patch_size
|
| 73 |
+
|
| 74 |
+
self.class_embedding = nn.Parameter(
|
| 75 |
+
torch.randn(1, 1, self.embed_dim),
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
self.patch_embedding = nn.Conv2d(
|
| 79 |
+
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 83 |
+
self.num_positions = self.num_patches + 1
|
| 84 |
+
|
| 85 |
+
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
| 86 |
+
|
| 87 |
+
def _get_pos_embed(self, pos_embed, H, W):
|
| 88 |
+
target_dtype = pos_embed.dtype
|
| 89 |
+
pos_embed = pos_embed.float().reshape(
|
| 90 |
+
1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
|
| 91 |
+
pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\
|
| 92 |
+
reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
|
| 93 |
+
return pos_embed
|
| 94 |
+
|
| 95 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
| 96 |
+
target_dtype = self.patch_embedding.weight.dtype
|
| 97 |
+
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
|
| 98 |
+
batch_size, _, height, width = patch_embeds.shape
|
| 99 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
| 100 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
| 101 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
| 102 |
+
position_embedding = torch.cat([
|
| 103 |
+
self.position_embedding[:, :1, :],
|
| 104 |
+
self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
|
| 105 |
+
], dim=1)
|
| 106 |
+
embeddings = embeddings + position_embedding.to(target_dtype)
|
| 107 |
+
return embeddings
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class InternAttention(nn.Module):
|
| 111 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 112 |
+
|
| 113 |
+
def __init__(self, config: InternVisionConfig):
|
| 114 |
+
super().__init__()
|
| 115 |
+
self.config = config
|
| 116 |
+
self.embed_dim = config.hidden_size
|
| 117 |
+
self.num_heads = config.num_attention_heads
|
| 118 |
+
self.use_flash_attn = config.use_flash_attn and has_flash_attn
|
| 119 |
+
self.use_flash_attn = True # modify
|
| 120 |
+
if config.use_flash_attn and not has_flash_attn:
|
| 121 |
+
print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
|
| 122 |
+
self.head_dim = self.embed_dim // self.num_heads
|
| 123 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
| 124 |
+
raise ValueError(
|
| 125 |
+
f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
|
| 126 |
+
f' {self.num_heads}).'
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
self.scale = self.head_dim ** -0.5
|
| 130 |
+
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
|
| 131 |
+
self.attn_drop = nn.Dropout(config.attention_dropout)
|
| 132 |
+
self.proj_drop = nn.Dropout(config.dropout)
|
| 133 |
+
|
| 134 |
+
self.qk_normalization = config.qk_normalization
|
| 135 |
+
|
| 136 |
+
if self.qk_normalization:
|
| 137 |
+
self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 138 |
+
self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
| 139 |
+
|
| 140 |
+
if self.use_flash_attn:
|
| 141 |
+
self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
|
| 142 |
+
self.proj = nn.Linear(self.embed_dim, self.embed_dim)
|
| 143 |
+
|
| 144 |
+
def _naive_attn(self, x):
|
| 145 |
+
B, N, C = x.shape
|
| 146 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
| 147 |
+
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
| 148 |
+
|
| 149 |
+
if self.qk_normalization:
|
| 150 |
+
B_, H_, N_, D_ = q.shape
|
| 151 |
+
q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 152 |
+
k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
| 153 |
+
|
| 154 |
+
attn = ((q * self.scale) @ k.transpose(-2, -1))
|
| 155 |
+
attn = attn.softmax(dim=-1)
|
| 156 |
+
attn = self.attn_drop(attn)
|
| 157 |
+
|
| 158 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
| 159 |
+
x = self.proj(x)
|
| 160 |
+
x = self.proj_drop(x)
|
| 161 |
+
return x
|
| 162 |
+
|
| 163 |
+
def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
|
| 164 |
+
qkv = self.qkv(x)
|
| 165 |
+
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
|
| 166 |
+
|
| 167 |
+
if self.qk_normalization:
|
| 168 |
+
q, k, v = qkv.unbind(2)
|
| 169 |
+
q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
|
| 170 |
+
k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
|
| 171 |
+
qkv = torch.stack([q, k, v], dim=2)
|
| 172 |
+
|
| 173 |
+
context, _ = self.inner_attn(
|
| 174 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
|
| 175 |
+
)
|
| 176 |
+
outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
|
| 177 |
+
outs = self.proj_drop(outs)
|
| 178 |
+
return outs
|
| 179 |
+
|
| 180 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 181 |
+
x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
|
| 182 |
+
return x
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class InternMLP(nn.Module):
|
| 186 |
+
def __init__(self, config: InternVisionConfig):
|
| 187 |
+
super().__init__()
|
| 188 |
+
self.config = config
|
| 189 |
+
self.act = ACT2FN[config.hidden_act]
|
| 190 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 191 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 192 |
+
|
| 193 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 194 |
+
hidden_states = self.fc1(hidden_states)
|
| 195 |
+
hidden_states = self.act(hidden_states)
|
| 196 |
+
hidden_states = self.fc2(hidden_states)
|
| 197 |
+
return hidden_states
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class InternVisionEncoderLayer(nn.Module):
|
| 201 |
+
def __init__(self, config: InternVisionConfig, drop_path_rate: float):
|
| 202 |
+
super().__init__()
|
| 203 |
+
self.embed_dim = config.hidden_size
|
| 204 |
+
self.intermediate_size = config.intermediate_size
|
| 205 |
+
self.norm_type = config.norm_type
|
| 206 |
+
|
| 207 |
+
self.attn = InternAttention(config)
|
| 208 |
+
self.mlp = InternMLP(config)
|
| 209 |
+
self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
| 210 |
+
self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
| 211 |
+
|
| 212 |
+
self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
| 213 |
+
self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
| 214 |
+
self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
| 215 |
+
self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
| 216 |
+
|
| 217 |
+
def forward(
|
| 218 |
+
self,
|
| 219 |
+
hidden_states: torch.Tensor,
|
| 220 |
+
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
|
| 221 |
+
"""
|
| 222 |
+
Args:
|
| 223 |
+
hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
|
| 227 |
+
|
| 228 |
+
hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
|
| 229 |
+
|
| 230 |
+
return hidden_states
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
class InternVisionEncoder(nn.Module):
|
| 234 |
+
"""
|
| 235 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
| 236 |
+
[`InternEncoderLayer`].
|
| 237 |
+
|
| 238 |
+
Args:
|
| 239 |
+
config (`InternConfig`):
|
| 240 |
+
The corresponding vision configuration for the `InternEncoder`.
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
def __init__(self, config: InternVisionConfig):
|
| 244 |
+
super().__init__()
|
| 245 |
+
self.config = config
|
| 246 |
+
# stochastic depth decay rule
|
| 247 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
|
| 248 |
+
self.layers = nn.ModuleList([
|
| 249 |
+
InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
|
| 250 |
+
self.gradient_checkpointing = True
|
| 251 |
+
|
| 252 |
+
def forward(
|
| 253 |
+
self,
|
| 254 |
+
inputs_embeds,
|
| 255 |
+
output_hidden_states: Optional[bool] = None,
|
| 256 |
+
return_dict: Optional[bool] = None,
|
| 257 |
+
) -> Union[Tuple, BaseModelOutput]:
|
| 258 |
+
r"""
|
| 259 |
+
Args:
|
| 260 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 261 |
+
Embedded representation of the inputs. Should be float, not int tokens.
|
| 262 |
+
output_hidden_states (`bool`, *optional*):
|
| 263 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 264 |
+
for more detail.
|
| 265 |
+
return_dict (`bool`, *optional*):
|
| 266 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 267 |
+
"""
|
| 268 |
+
output_hidden_states = (
|
| 269 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 270 |
+
)
|
| 271 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 272 |
+
|
| 273 |
+
encoder_states = () if output_hidden_states else None
|
| 274 |
+
hidden_states = inputs_embeds
|
| 275 |
+
|
| 276 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 277 |
+
if output_hidden_states:
|
| 278 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 279 |
+
if self.gradient_checkpointing and self.training:
|
| 280 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 281 |
+
encoder_layer,
|
| 282 |
+
hidden_states)
|
| 283 |
+
else:
|
| 284 |
+
layer_outputs = encoder_layer(
|
| 285 |
+
hidden_states,
|
| 286 |
+
)
|
| 287 |
+
hidden_states = layer_outputs
|
| 288 |
+
#import pdb
|
| 289 |
+
#pdb.set_trace()
|
| 290 |
+
|
| 291 |
+
if output_hidden_states:
|
| 292 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 293 |
+
|
| 294 |
+
if not return_dict:
|
| 295 |
+
return tuple(v for v in [hidden_states, encoder_states] if v is not None)
|
| 296 |
+
return BaseModelOutput(
|
| 297 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
class InternVisionModel(PreTrainedModel):
|
| 302 |
+
main_input_name = 'pixel_values'
|
| 303 |
+
config_class = InternVisionConfig
|
| 304 |
+
_no_split_modules = ['InternVisionEncoderLayer']
|
| 305 |
+
|
| 306 |
+
def __init__(self, config: InternVisionConfig):
|
| 307 |
+
super().__init__(config)
|
| 308 |
+
self.config = config
|
| 309 |
+
|
| 310 |
+
self.embeddings = InternVisionEmbeddings(config)
|
| 311 |
+
self.encoder = InternVisionEncoder(config)
|
| 312 |
+
|
| 313 |
+
def resize_pos_embeddings(self, old_size, new_size, patch_size):
|
| 314 |
+
pos_emb = self.embeddings.position_embedding
|
| 315 |
+
_, num_positions, embed_dim = pos_emb.shape
|
| 316 |
+
cls_emb = pos_emb[:, :1, :]
|
| 317 |
+
pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
|
| 318 |
+
pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
|
| 319 |
+
pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
|
| 320 |
+
pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
|
| 321 |
+
self.embeddings.position_embedding = nn.Parameter(pos_emb)
|
| 322 |
+
self.embeddings.image_size = new_size
|
| 323 |
+
logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
|
| 324 |
+
|
| 325 |
+
def get_input_embeddings(self):
|
| 326 |
+
return self.embeddings
|
| 327 |
+
|
| 328 |
+
def forward(
|
| 329 |
+
self,
|
| 330 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 331 |
+
output_hidden_states: Optional[bool] = None,
|
| 332 |
+
return_dict: Optional[bool] = None,
|
| 333 |
+
pixel_embeds: Optional[torch.FloatTensor] = None,
|
| 334 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 335 |
+
output_hidden_states = (
|
| 336 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 337 |
+
)
|
| 338 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 339 |
+
|
| 340 |
+
if pixel_values is None and pixel_embeds is None:
|
| 341 |
+
raise ValueError('You have to specify pixel_values or pixel_embeds')
|
| 342 |
+
|
| 343 |
+
if pixel_embeds is not None:
|
| 344 |
+
hidden_states = pixel_embeds
|
| 345 |
+
else:
|
| 346 |
+
if len(pixel_values.shape) == 4:
|
| 347 |
+
hidden_states = self.embeddings(pixel_values)
|
| 348 |
+
else:
|
| 349 |
+
raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
|
| 350 |
+
encoder_outputs = self.encoder(
|
| 351 |
+
inputs_embeds=hidden_states,
|
| 352 |
+
output_hidden_states=output_hidden_states,
|
| 353 |
+
return_dict=return_dict,
|
| 354 |
+
)
|
| 355 |
+
last_hidden_state = encoder_outputs.last_hidden_state
|
| 356 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 357 |
+
|
| 358 |
+
if not return_dict:
|
| 359 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 360 |
+
|
| 361 |
+
return BaseModelOutputWithPooling(
|
| 362 |
+
last_hidden_state=last_hidden_state,
|
| 363 |
+
pooler_output=pooled_output,
|
| 364 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 365 |
+
attentions=encoder_outputs.attentions,
|
| 366 |
+
)
|
modeling_yuanlm2.py
ADDED
|
@@ -0,0 +1,1624 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 5 |
+
# and OPT implementations in this library. It has been modified from its
|
| 6 |
+
# original forms to accommodate minor architectural differences compared
|
| 7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 8 |
+
#
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 10 |
+
# you may not use this file except in compliance with the License.
|
| 11 |
+
# You may obtain a copy of the License at
|
| 12 |
+
#
|
| 13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 14 |
+
#
|
| 15 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
+
# See the License for the specific language governing permissions and
|
| 19 |
+
# limitations under the License.
|
| 20 |
+
""" PyTorch Yuan model."""
|
| 21 |
+
import math
|
| 22 |
+
from typing import List, Optional, Tuple, Union
|
| 23 |
+
import torch.nn.functional as F
|
| 24 |
+
import torch
|
| 25 |
+
import torch.utils.checkpoint
|
| 26 |
+
from torch import einsum, nn
|
| 27 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 28 |
+
from transformers.activations import ACT2FN
|
| 29 |
+
from transformers.generation import GenerationMixin
|
| 30 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
| 31 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 32 |
+
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
| 33 |
+
from .configuration_yuan import YuanConfig
|
| 34 |
+
from einops import rearrange
|
| 35 |
+
# from flash_attn import flash_attn_varlen_func as flash_attn_unpadded_func
|
| 36 |
+
#from apex.normalization import MixedFusedRMSNorm as RMSNorm
|
| 37 |
+
#from flash_attn import flash_attn_func
|
| 38 |
+
#from transformer_engine.pytorch import RMSNorm
|
| 39 |
+
import pdb
|
| 40 |
+
import copy
|
| 41 |
+
try:
|
| 42 |
+
import grouped_gemm as gg
|
| 43 |
+
except ImportError:
|
| 44 |
+
gg = None
|
| 45 |
+
try:
|
| 46 |
+
from flash_attn import flash_attn_varlen_func as flash_attn_unpadded_func
|
| 47 |
+
from flash_attn import flash_attn_func
|
| 48 |
+
except ImportError:
|
| 49 |
+
flash_attn_unpadded_func = None
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
logger = logging.get_logger(__name__)
|
| 53 |
+
|
| 54 |
+
_CONFIG_FOR_DOC = "YuanConfig"
|
| 55 |
+
|
| 56 |
+
class RMSNorm(torch.nn.Module):
|
| 57 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 58 |
+
super().__init__()
|
| 59 |
+
self.weight = torch.nn.Parameter(torch.ones(hidden_size))
|
| 60 |
+
self.variance_epsilon = eps
|
| 61 |
+
|
| 62 |
+
def forward(self, hidden_states):
|
| 63 |
+
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
|
| 64 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 65 |
+
|
| 66 |
+
# convert into half-precision if necessary
|
| 67 |
+
if self.weight.dtype in [torch.float16, torch.bfloat16]:
|
| 68 |
+
hidden_states = hidden_states.to(self.weight.dtype)
|
| 69 |
+
|
| 70 |
+
return self.weight * hidden_states
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
"""
|
| 74 |
+
class YuanRotaryEmbedding(nn.Module):
|
| 75 |
+
def __init__(self, dim, base=10000, dtype=torch.float32, device=None, scaling_factor=1.0, rope_type='default'):
|
| 76 |
+
super().__init__()
|
| 77 |
+
inv_freq = (1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))).to(dtype)#.to('cuda:1')
|
| 78 |
+
self.register_buffer('inv_freq', inv_freq)
|
| 79 |
+
|
| 80 |
+
def forward(self, max_seq_len, offset=0):
|
| 81 |
+
self.inv_freq = self.inv_freq.to(torch.float32)
|
| 82 |
+
seq = torch.arange(max_seq_len, device=self.inv_freq.device) + offset
|
| 83 |
+
freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq)
|
| 84 |
+
# first part even vector components, second part odd vector components,
|
| 85 |
+
# 2 * dim in dimension size
|
| 86 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 87 |
+
# emb [seq_length, .., dim]
|
| 88 |
+
return emb[:, None, None, :]"""
|
| 89 |
+
|
| 90 |
+
class YuanRotaryEmbedding(nn.Module):
|
| 91 |
+
def __init__(self, dim, base=10000, dtype=torch.float32, rotary_interleaved=False, seq_len_interpolation_factor=None):
|
| 92 |
+
super().__init__()
|
| 93 |
+
self.base = base
|
| 94 |
+
self.dim = dim
|
| 95 |
+
self.rotary_interleaved = rotary_interleaved
|
| 96 |
+
self.seq_len_interpolation_factor = seq_len_interpolation_factor
|
| 97 |
+
|
| 98 |
+
def get_rotary_seq_len(
|
| 99 |
+
self,
|
| 100 |
+
inference_param=None,
|
| 101 |
+
transformer_input: torch.Tensor=None,
|
| 102 |
+
transformer_config=None,
|
| 103 |
+
):
|
| 104 |
+
if inference_param is not None:
|
| 105 |
+
rotary_seq_len = inference_param.max_sequence_length
|
| 106 |
+
else:
|
| 107 |
+
rotary_seq_len = transformer_input.size[0]
|
| 108 |
+
if transformer_config.sequence_parallel:
|
| 109 |
+
rotary_seq_len *= transformer_config.tensor_model_parallel_size
|
| 110 |
+
|
| 111 |
+
return rotary_seq_len
|
| 112 |
+
|
| 113 |
+
def forward(self, max_seq_len, offset=0):
|
| 114 |
+
|
| 115 |
+
"""Forward pass of RoPE embedding.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
max_seq_len (int): Maximum size of sequence
|
| 119 |
+
offset (int, optional): _description_. Defaults to 0.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
Tensor: Embeddings after applying RoPE.
|
| 123 |
+
"""
|
| 124 |
+
inv_freq = (1.0 / ( self.base**(torch.arange(0, self.dim, 2, dtype=torch.float32, device=torch.cuda.current_device()) / self.dim))).to(torch.float32)
|
| 125 |
+
|
| 126 |
+
#max_seq_len_int = max_seq_len.item() if max_seq_len.numel() == 1 else max_seq_len.max().item()
|
| 127 |
+
seq = (
|
| 128 |
+
torch.arange(max_seq_len, device=inv_freq.device, dtype=inv_freq.dtype)
|
| 129 |
+
+ offset
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
if self.seq_len_interpolation_factor is not None:
|
| 133 |
+
seq *= 1 / self.seq_len_interpolation_factor
|
| 134 |
+
|
| 135 |
+
freqs = torch.outer(seq, inv_freq)
|
| 136 |
+
# first part even vector components, second part odd vector components,
|
| 137 |
+
# 2 * dim in dimension size
|
| 138 |
+
if not self.rotary_interleaved:
|
| 139 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 140 |
+
else:
|
| 141 |
+
emb = torch.stack((freqs.view(-1, 1), freqs.view(-1, 1)), dim=-1).view(
|
| 142 |
+
freqs.shape[0], -1
|
| 143 |
+
)
|
| 144 |
+
# emb [seq_length, .., dim]
|
| 145 |
+
emb = emb[:, None, None, :]
|
| 146 |
+
#emb = emb[:, None, :]
|
| 147 |
+
return emb
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def _rotate_half(x, rotary_interleaved):
|
| 151 |
+
"""huggingface version
|
| 152 |
+
change sign so the last dimension becomes [-odd, +even]
|
| 153 |
+
|
| 154 |
+
x1, x2 = torch.chunk(x, 2, dim=-1)
|
| 155 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 156 |
+
"""
|
| 157 |
+
if not rotary_interleaved:
|
| 158 |
+
x1, x2 = torch.chunk(x, 2, dim=-1)
|
| 159 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 160 |
+
else:
|
| 161 |
+
x1 = x[:, :, :, ::2]
|
| 162 |
+
x2 = x[:, :, :, 1::2]
|
| 163 |
+
x_new = torch.stack((-x2, x1), dim=-1)
|
| 164 |
+
return x_new.view(x_new.shape[0], x_new.shape[1], x_new.shape[2], -1)
|
| 165 |
+
|
| 166 |
+
def apply_rotary_pos_emb(t, freqs, position_ids, rotary_interleaved=False):
|
| 167 |
+
|
| 168 |
+
rot_dim = freqs.shape[-1]
|
| 169 |
+
#if position_ids.shape[1] > 1:
|
| 170 |
+
freqs = freqs[position_ids]
|
| 171 |
+
freqs = freqs.view(t.shape[1],freqs.shape[1],freqs.shape[2],freqs.shape[4]).transpose(0,1)
|
| 172 |
+
# ideally t_pass is empty so rotary pos embedding is applied to all tensor t
|
| 173 |
+
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
|
| 174 |
+
|
| 175 |
+
# first part is cosine component
|
| 176 |
+
# second part is sine component, need to change signs with _rotate_half method
|
| 177 |
+
t_type = t.dtype
|
| 178 |
+
cos_ = torch.cos(freqs).to(t.dtype)
|
| 179 |
+
sin_ = torch.sin(freqs).to(t.dtype)
|
| 180 |
+
|
| 181 |
+
t = (t * cos_) + (_rotate_half(t, rotary_interleaved) * sin_)
|
| 182 |
+
return torch.cat((t, t_pass), dim=-1)
|
| 183 |
+
"""huggingface version
|
| 184 |
+
input tensor t is of shape [seq_length, ..., dim]
|
| 185 |
+
rotary positional embeding tensor freqs is of shape [seq_length, ..., dim]
|
| 186 |
+
check https://kexue.fm/archives/8265 for detailed formulas
|
| 187 |
+
|
| 188 |
+
dtype = t.dtype
|
| 189 |
+
rot_dim = freqs.shape[-1]
|
| 190 |
+
t_pass = t[..., rot_dim:]
|
| 191 |
+
if position_ids.shape[1] > 1:
|
| 192 |
+
freqs = freqs[position_ids]
|
| 193 |
+
freqs = freqs.view(t.shape[1],freqs.shape[1],freqs.shape[2],freqs.shape[4]).transpose(0,1)
|
| 194 |
+
# ideally t_pass is empty so rotary pos embedding is applied to all tensor t
|
| 195 |
+
t = t[..., :rot_dim]
|
| 196 |
+
# first part is cosine component
|
| 197 |
+
# second part is sine component, need to change signs with _rotate_half method
|
| 198 |
+
t = (t * freqs.cos()) + (_rotate_half(t) * freqs.sin())
|
| 199 |
+
t = t.to(dtype)
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
return torch.cat((t, t_pass), dim=-1)
|
| 203 |
+
|
| 204 |
+
class LocalizedFiltering(torch.nn.Module):
|
| 205 |
+
"""
|
| 206 |
+
Mega's Exponential Moving Average layer, largely left unmodified from the original repo with the exception of
|
| 207 |
+
variable names and moving away from the stateful representation of incremental decoding state. See
|
| 208 |
+
"https://arxiv.org/abs/2209.10655" for more details.
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
def __init__(self, hidden_size, lf_conv2d_group, lf_conv2d_num_pad):
|
| 212 |
+
super().__init__()
|
| 213 |
+
|
| 214 |
+
self.embed_dim = hidden_size
|
| 215 |
+
self.lf_conv2d_group = lf_conv2d_group
|
| 216 |
+
self.lf_conv2d_num_pad = lf_conv2d_num_pad
|
| 217 |
+
if self.lf_conv2d_num_pad == 1:
|
| 218 |
+
self.training = True
|
| 219 |
+
self.conv1 = torch.nn.Conv2d(self.embed_dim, self.embed_dim // 2, (2, 1), stride=(1, 1), padding=(self.lf_conv2d_num_pad, 0), groups=self.lf_conv2d_group)
|
| 220 |
+
self.conv2 = torch.nn.Conv2d(self.embed_dim // 2, self.embed_dim, (2, 1), stride=(1, 1), padding=(self.lf_conv2d_num_pad, 0), groups=self.lf_conv2d_group)
|
| 221 |
+
self.output_layernorm = RMSNorm(self.embed_dim, eps=1e-6)
|
| 222 |
+
|
| 223 |
+
def _train_forward(self, inputs):
|
| 224 |
+
inputs = inputs.transpose(0,1)
|
| 225 |
+
seq_len, bsz, embed_dim = inputs.size()
|
| 226 |
+
if embed_dim != self.embed_dim:
|
| 227 |
+
raise ValueError(
|
| 228 |
+
f"Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}"
|
| 229 |
+
)
|
| 230 |
+
residual = inputs
|
| 231 |
+
|
| 232 |
+
inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
|
| 233 |
+
output1 = self.conv1(inputs)
|
| 234 |
+
output1 = output1[:, :, :seq_len, :]
|
| 235 |
+
|
| 236 |
+
output2 = self.conv2(output1)
|
| 237 |
+
output2 = output2[:, :, :seq_len, :].permute(2, 3, 0, 1).contiguous()
|
| 238 |
+
output2 = output2.view(seq_len, bsz, embed_dim)
|
| 239 |
+
assert output2.shape == residual.shape
|
| 240 |
+
|
| 241 |
+
torch.cuda.set_device(output2.device)
|
| 242 |
+
lf_output = self.output_layernorm(output2 + residual)
|
| 243 |
+
lf_output = lf_output.transpose(0,1)
|
| 244 |
+
return lf_output
|
| 245 |
+
|
| 246 |
+
def _inference_forward(self, inputs, before_hidden_states):
|
| 247 |
+
|
| 248 |
+
if before_hidden_states is None:
|
| 249 |
+
residual = inputs
|
| 250 |
+
seq_len, bsz, embed_dim = inputs.size()
|
| 251 |
+
|
| 252 |
+
inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
|
| 253 |
+
|
| 254 |
+
pad_zero1 = torch.zeros(bsz, embed_dim, 1, 1).to(inputs)
|
| 255 |
+
inputs = torch.cat((pad_zero1, inputs), dim=2).contiguous()
|
| 256 |
+
output1 = self.conv1(inputs)
|
| 257 |
+
|
| 258 |
+
pad_zero2 = torch.zeros(bsz, embed_dim // 2, 1, 1).to(output1)
|
| 259 |
+
output1 = torch.cat((pad_zero2, output1), dim=2).contiguous()
|
| 260 |
+
output2 = self.conv2(output1)
|
| 261 |
+
|
| 262 |
+
output2 = output2.permute(2, 3, 0, 1).contiguous()
|
| 263 |
+
|
| 264 |
+
output2 = output2.view(seq_len, bsz, embed_dim)
|
| 265 |
+
|
| 266 |
+
assert output2.shape == residual.shape
|
| 267 |
+
|
| 268 |
+
lf_output = self.output_layernorm(output2 + residual)
|
| 269 |
+
|
| 270 |
+
else:
|
| 271 |
+
residual = inputs
|
| 272 |
+
|
| 273 |
+
seq_len, bsz, embed_dim = inputs.size()
|
| 274 |
+
seq_len_before, _, _ = before_hidden_states.size()
|
| 275 |
+
|
| 276 |
+
assert seq_len == 1 and seq_len_before == 2
|
| 277 |
+
|
| 278 |
+
inputs = torch.cat((before_hidden_states, inputs), dim=0)
|
| 279 |
+
inputs = inputs.view(3, 1, bsz, embed_dim).permute(2, 3, 0, 1)
|
| 280 |
+
|
| 281 |
+
output1 = self.conv1(inputs)
|
| 282 |
+
output2 = self.conv2(output1)
|
| 283 |
+
output2 = output2.view(1, bsz, embed_dim)
|
| 284 |
+
|
| 285 |
+
assert output2.shape == residual.shape
|
| 286 |
+
|
| 287 |
+
lf_output = self.output_layernorm(output2 + residual)
|
| 288 |
+
|
| 289 |
+
return lf_output
|
| 290 |
+
'''#IEIyuan huggingface version
|
| 291 |
+
if before_hidden_states == None:
|
| 292 |
+
inputs = inputs.transpose(0,1)
|
| 293 |
+
seq_len, bsz, embed_dim = inputs.size()
|
| 294 |
+
if embed_dim != self.embed_dim:
|
| 295 |
+
raise ValueError(
|
| 296 |
+
f"Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}"
|
| 297 |
+
)
|
| 298 |
+
residual = inputs
|
| 299 |
+
inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
|
| 300 |
+
inputs = torch.cat((torch.zeros(bsz, embed_dim, 1, 1, dtype=inputs.dtype, device=inputs.device), inputs), dim=2).contiguous()
|
| 301 |
+
output1 = self.conv1(inputs)
|
| 302 |
+
|
| 303 |
+
output1 = torch.cat((torch.zeros(bsz, embed_dim // 2, 1, 1, dtype=inputs.dtype, device=inputs.device), output1), dim=2).contiguous()
|
| 304 |
+
output2 = self.conv2(output1).permute(2, 3, 0, 1).contiguous()
|
| 305 |
+
output2 = output2.view(seq_len, bsz, embed_dim)
|
| 306 |
+
assert output2.shape == residual.shape
|
| 307 |
+
norm_input = (output2 + residual)#.to('cuda:0')
|
| 308 |
+
torch.cuda.set_device(norm_input.device)
|
| 309 |
+
lf_output = self.output_layernorm(norm_input)
|
| 310 |
+
lf_output = lf_output#.to('cuda:1')
|
| 311 |
+
lf_output = lf_output.transpose(0,1)
|
| 312 |
+
return lf_output
|
| 313 |
+
else:
|
| 314 |
+
inputs = inputs.transpose(0,1)
|
| 315 |
+
before_hidden_states = before_hidden_states.transpose(0,1)
|
| 316 |
+
seq_len, bsz, embed_dim = inputs.size()
|
| 317 |
+
if embed_dim != self.embed_dim:
|
| 318 |
+
raise ValueError(
|
| 319 |
+
f"Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}"
|
| 320 |
+
)
|
| 321 |
+
residual = inputs
|
| 322 |
+
inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
|
| 323 |
+
before_hidden_states = before_hidden_states.view(2, 1, bsz, embed_dim).permute(2, 3, 0, 1)
|
| 324 |
+
inputs = torch.cat((before_hidden_states, inputs), dim=2).contiguous()
|
| 325 |
+
output1 = self.conv1(inputs)
|
| 326 |
+
output2 = self.conv2(output1).permute(2, 3, 0, 1).contiguous()
|
| 327 |
+
output2 = output2.view(seq_len, bsz, embed_dim)
|
| 328 |
+
assert output2.shape == residual.shape
|
| 329 |
+
|
| 330 |
+
norm_input = (output2 + residual)#.to('cuda:0')
|
| 331 |
+
torch.cuda.set_device(norm_input.device)
|
| 332 |
+
lf_output = self.output_layernorm(norm_input)
|
| 333 |
+
lf_output = lf_output#.to('cuda:1')
|
| 334 |
+
lf_output = lf_output.transpose(0,1)
|
| 335 |
+
return lf_output
|
| 336 |
+
'''
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def forward(
|
| 340 |
+
self,
|
| 341 |
+
inputs,
|
| 342 |
+
before_hidden_states = None,
|
| 343 |
+
) -> torch.Tensor:
|
| 344 |
+
# assert self.lf_conv2d_num_pad == 1
|
| 345 |
+
if self.training:
|
| 346 |
+
lf_output = self._train_forward(inputs)
|
| 347 |
+
else:
|
| 348 |
+
lf_output = self._inference_forward(inputs, before_hidden_states)
|
| 349 |
+
|
| 350 |
+
return lf_output
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
| 354 |
+
def _make_causal_mask(
|
| 355 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
| 356 |
+
):
|
| 357 |
+
"""
|
| 358 |
+
Make causal mask used for bi-directional self-attention.
|
| 359 |
+
"""
|
| 360 |
+
bsz, tgt_len = input_ids_shape
|
| 361 |
+
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
|
| 362 |
+
mask_cond = torch.arange(mask.size(-1), device=device)
|
| 363 |
+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
| 364 |
+
mask = mask.to(dtype)
|
| 365 |
+
|
| 366 |
+
if past_key_values_length > 0:
|
| 367 |
+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
| 368 |
+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
| 372 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
| 373 |
+
"""
|
| 374 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
| 375 |
+
"""
|
| 376 |
+
bsz, src_len = mask.size()
|
| 377 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
| 378 |
+
|
| 379 |
+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
| 380 |
+
|
| 381 |
+
inverted_mask = 1.0 - expanded_mask
|
| 382 |
+
|
| 383 |
+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
class YuanRMSNorm(nn.Module):
|
| 387 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 388 |
+
"""
|
| 389 |
+
YuanRMSNorm is equivalent to LlamaRMSNorm
|
| 390 |
+
"""
|
| 391 |
+
super().__init__()
|
| 392 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 393 |
+
self.variance_epsilon = eps
|
| 394 |
+
|
| 395 |
+
def forward(self, hidden_states):
|
| 396 |
+
input_dtype = hidden_states.dtype
|
| 397 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 398 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 399 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 400 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 401 |
+
|
| 402 |
+
# flash attn
|
| 403 |
+
class FlashSelfAttention(torch.nn.Module):
|
| 404 |
+
"""Implement the scaled dot product attention with softmax.
|
| 405 |
+
Arguments
|
| 406 |
+
---------
|
| 407 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 408 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 409 |
+
runtime)
|
| 410 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 411 |
+
(default: 0.0)
|
| 412 |
+
"""
|
| 413 |
+
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
|
| 414 |
+
device=None, dtype=None):
|
| 415 |
+
super().__init__()
|
| 416 |
+
assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, '
|
| 417 |
+
'e.g., with pip install flash-attn')
|
| 418 |
+
assert rearrange is not None, 'Please install einops first, e.g., with pip install einops'
|
| 419 |
+
self.causal = causal
|
| 420 |
+
self.softmax_scale = softmax_scale
|
| 421 |
+
self.dropout_p = attention_dropout
|
| 422 |
+
|
| 423 |
+
def forward(self, q, k, v):
|
| 424 |
+
"""Implements the multihead softmax attention.
|
| 425 |
+
Arguments
|
| 426 |
+
---------
|
| 427 |
+
q, k, v: The tensor containing the query, key, and value. (B, S, H, D)
|
| 428 |
+
"""
|
| 429 |
+
|
| 430 |
+
assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q,k,v)))
|
| 431 |
+
assert all((i.is_cuda for i in (q,k,v)))
|
| 432 |
+
|
| 433 |
+
batch_size, seqlen_q = q.shape[1], q.shape[0]
|
| 434 |
+
seqlen_k = k.shape[0]
|
| 435 |
+
q, k, v = [rearrange(x, 'b s ... -> (b s) ...') for x in [q, k, v]]
|
| 436 |
+
cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q.device)
|
| 437 |
+
if self.training:
|
| 438 |
+
# during training q,k,v always have same seqlen
|
| 439 |
+
assert seqlen_k == seqlen_q
|
| 440 |
+
is_causal = self.causal
|
| 441 |
+
cu_seqlens_k = cu_seqlens_q
|
| 442 |
+
dropout_p = self.dropout_p
|
| 443 |
+
else:
|
| 444 |
+
# turn off FA causal mask after first inference autoregressive iteration
|
| 445 |
+
# only on first autoregressive step q,k,v have same seqlen
|
| 446 |
+
is_causal = seqlen_q == seqlen_k
|
| 447 |
+
cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=q.device)
|
| 448 |
+
#cu_seqlens_q = [cu_seqlens_q[0], cu_seqlens_q[-1]]
|
| 449 |
+
#cu_seqlens_k = [cu_seqlens_k[0], cu_seqlens_k[-1]]
|
| 450 |
+
dropout_p = 0
|
| 451 |
+
|
| 452 |
+
output = flash_attn_unpadded_func(q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k, dropout_p, softmax_scale=self.softmax_scale, causal=is_causal)
|
| 453 |
+
|
| 454 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
| 455 |
+
return output
|
| 456 |
+
|
| 457 |
+
class ParallelAttention_router(nn.Module):
|
| 458 |
+
def __init__(self, config):
|
| 459 |
+
super(ParallelAttention_router, self).__init__()
|
| 460 |
+
layer_number=0
|
| 461 |
+
self.layer_number = max(1, layer_number)
|
| 462 |
+
|
| 463 |
+
self.hidden_size = config.hidden_size
|
| 464 |
+
self.projection_size = config.moe_config['moe_num_experts']
|
| 465 |
+
|
| 466 |
+
self.num_attention_router_heads = config.moe_config['num_attention_router_heads']
|
| 467 |
+
self.hidden_size_per_attention_head = config.max_position_embeddings // self.num_attention_router_heads
|
| 468 |
+
self.query_key_value = nn.Linear(self.hidden_size, self.projection_size*3, bias=False)
|
| 469 |
+
|
| 470 |
+
def forward(self, hidden_states, attention_mask=None, enc_position_ids=None,
|
| 471 |
+
encoder_output=None, inference_params=None,
|
| 472 |
+
rotary_pos_emb=None):
|
| 473 |
+
is_first_step = False
|
| 474 |
+
before_hidden_states = None
|
| 475 |
+
|
| 476 |
+
#mixed_x_layer = torch.matmul(hidden_states, self.query_key_value)
|
| 477 |
+
mixed_x_layer = self.query_key_value(hidden_states)
|
| 478 |
+
(query_layer, key_layer, value_layer) = torch.split(mixed_x_layer, self.projection_size, -1)
|
| 479 |
+
b, s, z = query_layer.shape
|
| 480 |
+
|
| 481 |
+
# use fp32 router
|
| 482 |
+
query_layer = query_layer.float().view(b,s,z,1)
|
| 483 |
+
key_layer = key_layer.float().view(b,s,z,1)
|
| 484 |
+
value_layer = value_layer.float().view(b,s,z,1)
|
| 485 |
+
|
| 486 |
+
attn_weights = torch.matmul(query_layer, key_layer.transpose(2, 3))
|
| 487 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
| 488 |
+
attn_output = torch.matmul(attn_weights, value_layer)
|
| 489 |
+
router_output = attn_output.view(-1, z)
|
| 490 |
+
return router_output
|
| 491 |
+
|
| 492 |
+
class YuanExpertMLP(nn.Module):
|
| 493 |
+
def __init__(self, config):
|
| 494 |
+
super(YuanExpertMLP, self).__init__()
|
| 495 |
+
self.gated_linear_unit = config.moe_config['gated_linear_unit']
|
| 496 |
+
#self.ffn_hidden_size = config.moe_config['ffn_hidden_size']
|
| 497 |
+
self.ffn_hidden_size = config.ffn_hidden_size
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
if self.gated_linear_unit:
|
| 501 |
+
self.w1 = nn.Linear(config.hidden_size, self.ffn_hidden_size*2, bias=False)
|
| 502 |
+
|
| 503 |
+
else:
|
| 504 |
+
self.w1 = nn.Linear(config.hidden_size, self.ffn_hidden_size, bias=False)
|
| 505 |
+
|
| 506 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 507 |
+
self.w2 = nn.Linear(self.ffn_hidden_size, config.hidden_size, bias=False)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def forward(self, x):
|
| 511 |
+
x = self.w1(x)
|
| 512 |
+
if self.gated_linear_unit:
|
| 513 |
+
x = torch.chunk(x, 2, dim=-1)
|
| 514 |
+
x = self.act_fn(x[0]) * x[1]
|
| 515 |
+
else:
|
| 516 |
+
x = self.act_fn(x)
|
| 517 |
+
x = self.w2(x)
|
| 518 |
+
return x
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
class YuanMLP(nn.Module):
|
| 523 |
+
def __init__(
|
| 524 |
+
self,
|
| 525 |
+
hidden_size: int,
|
| 526 |
+
intermediate_size: int,
|
| 527 |
+
hidden_act: str
|
| 528 |
+
):
|
| 529 |
+
super().__init__()
|
| 530 |
+
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
| 531 |
+
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
| 532 |
+
self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
|
| 533 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 534 |
+
|
| 535 |
+
def forward(self, x):
|
| 536 |
+
return self.down_proj(self.gate_proj(x) * self.act_fn(self.up_proj(x)))
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
class YuanAttention(nn.Module):
|
| 540 |
+
"""Localized Filtering-based Attention 'YUAN 2.0: A Large Language Model with Localized Filtering-based Attention' paper"""
|
| 541 |
+
|
| 542 |
+
def __init__(self, config: YuanConfig):
|
| 543 |
+
super().__init__()
|
| 544 |
+
self.config = config
|
| 545 |
+
self.hidden_size = config.hidden_size
|
| 546 |
+
self.num_heads = config.num_attention_heads
|
| 547 |
+
self.lf_conv2d_group = config.lf_conv2d_group
|
| 548 |
+
self.lf_conv2d_num_pad = config.lf_conv2d_num_pad
|
| 549 |
+
|
| 550 |
+
try:
|
| 551 |
+
self.attention_projection_size = config.attention_projection_size
|
| 552 |
+
except:
|
| 553 |
+
self.attention_projection_size = None
|
| 554 |
+
|
| 555 |
+
if self.attention_projection_size is None:
|
| 556 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 557 |
+
else:
|
| 558 |
+
self.head_dim = self.attention_projection_size // self.num_heads
|
| 559 |
+
|
| 560 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 561 |
+
self.causal_mask = config.causal_mask
|
| 562 |
+
self.attn_mask_type = config.attn_mask_type
|
| 563 |
+
self.softmax_scale = 1.0 / math.sqrt(self.head_dim)
|
| 564 |
+
self.use_flash_attention = config.use_flash_attention
|
| 565 |
+
try:
|
| 566 |
+
self.use_shareqk = config.use_shareqk
|
| 567 |
+
except Exception as e:
|
| 568 |
+
self.use_shareqk=False
|
| 569 |
+
self.dropout = 0.0
|
| 570 |
+
self.attention_projection_size = config.attention_projection_size
|
| 571 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
| 572 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 573 |
+
|
| 574 |
+
if self.use_shareqk:
|
| 575 |
+
self.qk_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
| 576 |
+
self.qk_weight = nn.Parameter(torch.Tensor(2, self.hidden_size))
|
| 577 |
+
self.qk_bias = nn.Parameter(torch.Tensor(2, self.hidden_size))
|
| 578 |
+
else:
|
| 579 |
+
self.lf_gate = LocalizedFiltering(self.hidden_size, self.lf_conv2d_group, self.lf_conv2d_num_pad)
|
| 580 |
+
self.get_query_key = nn.Linear(self.hidden_size, 2 * self.attention_projection_size, bias=False)
|
| 581 |
+
self.core_attention = FlashSelfAttention(causal=True, attention_dropout=config.attn_dropout, softmax_scale=self.softmax_scale)
|
| 582 |
+
#self.core_attention_flash = DotProductAttention(num_attention_heads=self.num_heads,
|
| 583 |
+
# kv_channels=self.head_dim)
|
| 584 |
+
|
| 585 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 586 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 587 |
+
|
| 588 |
+
def forward(
|
| 589 |
+
self,
|
| 590 |
+
hidden_states: torch.Tensor,
|
| 591 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 592 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 593 |
+
position_ids_k: Optional[torch.LongTensor] = None,
|
| 594 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 595 |
+
rotary_pos_emb: Optional[Tuple[torch.Tensor]] = None,
|
| 596 |
+
output_attentions: bool = False,
|
| 597 |
+
use_cache: bool = False,
|
| 598 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 599 |
+
q_len, bsz, _ = hidden_states.size()
|
| 600 |
+
hidden_states = hidden_states#.to('cuda:1')
|
| 601 |
+
is_first_step = False
|
| 602 |
+
if use_cache:
|
| 603 |
+
if past_key_value is None:
|
| 604 |
+
before_hidden_states = None
|
| 605 |
+
is_first_step = True
|
| 606 |
+
if q_len > 1:
|
| 607 |
+
inference_hidden_states_memory = hidden_states[-2:, :, :]
|
| 608 |
+
else:
|
| 609 |
+
inference_hidden_states_memory = torch.cat((torch.zeros_like(hidden_states), hidden_states), dim=0)
|
| 610 |
+
else:
|
| 611 |
+
before_hidden_states = past_key_value[2]
|
| 612 |
+
inference_hidden_states_memory = torch.cat((before_hidden_states[-1:, :, :], hidden_states), dim=0)
|
| 613 |
+
value_states = self.v_proj(hidden_states).view(q_len, bsz, self.num_heads, self.head_dim)
|
| 614 |
+
if self.use_shareqk:
|
| 615 |
+
qk_states = self.qk_proj(hidden_states).view(q_len, bsz, self.num_heads*self.head_dim)
|
| 616 |
+
query_key = qk_states.unsqueeze(2) * self.qk_weight + self.qk_bias
|
| 617 |
+
query_states, key_states = torch.unbind(query_key, dim=2)
|
| 618 |
+
|
| 619 |
+
query_states = query_states.view(q_len, bsz, self.num_heads, self.head_dim).transpose(1, 2)
|
| 620 |
+
key_states = key_states.view(q_len, bsz, self.num_heads, self.head_dim).transpose(1, 2)
|
| 621 |
+
else:
|
| 622 |
+
hidden_states = self.lf_gate(hidden_states, before_hidden_states)
|
| 623 |
+
mixed_qk_layer = self.get_query_key(hidden_states)
|
| 624 |
+
#mixed_qk_layer = torch.matmul(hidden_states, qk_tensor)
|
| 625 |
+
new_tensor_shape = mixed_qk_layer.size()[:-1] + (self.num_heads, 2 * self.head_dim)
|
| 626 |
+
mixed_qk_layer = mixed_qk_layer.view(*new_tensor_shape)
|
| 627 |
+
(query_states, key_states) = torch.split(mixed_qk_layer, self.head_dim, dim=-1)
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
kv_seq_len = key_states.shape[1]
|
| 631 |
+
if past_key_value is not None:
|
| 632 |
+
kv_seq_len += past_key_value[0].shape[1]
|
| 633 |
+
|
| 634 |
+
# duplicate the pos_emb for self attention
|
| 635 |
+
if rotary_pos_emb is not None:
|
| 636 |
+
if position_ids.shape[1] == 1:
|
| 637 |
+
q_seq_start = position_ids[0,-1]
|
| 638 |
+
#seq_start = past_key_value[0].shape[0]
|
| 639 |
+
q_seq_end = q_seq_start + 1
|
| 640 |
+
k_seq_end = q_seq_end
|
| 641 |
+
else:
|
| 642 |
+
q_seq_start = 0
|
| 643 |
+
q_seq_end = q_seq_start+key_states.shape[0]
|
| 644 |
+
k_seq_end = q_seq_end
|
| 645 |
+
|
| 646 |
+
rotary_pos_shape = rotary_pos_emb.shape
|
| 647 |
+
if isinstance(rotary_pos_emb, tuple):
|
| 648 |
+
rotary_pos_emb = rotary_pos_emb
|
| 649 |
+
else:
|
| 650 |
+
rotary_pos_emb = ((rotary_pos_emb,) * 2)
|
| 651 |
+
q_pos_emb, k_pos_emb = rotary_pos_emb
|
| 652 |
+
if past_key_value is not None:
|
| 653 |
+
# reuse k, v, self_attention
|
| 654 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=0)
|
| 655 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=0)
|
| 656 |
+
past_key_value = (key_states, value_states, inference_hidden_states_memory) if use_cache else None
|
| 657 |
+
#query_states = apply_rotary_pos_emb(query_states.permute(1, 0, 2, 3), q_pos_emb, position_ids)
|
| 658 |
+
#key_states = apply_rotary_pos_emb(key_states.permute(1, 0, 2, 3), k_pos_emb, position_ids)
|
| 659 |
+
query_states = apply_rotary_pos_emb(query_states, q_pos_emb, position_ids)
|
| 660 |
+
key_states = apply_rotary_pos_emb(key_states, k_pos_emb, position_ids_k)
|
| 661 |
+
|
| 662 |
+
attn_weights = None
|
| 663 |
+
#query_states = query_states.transpose(0,1)
|
| 664 |
+
#key_states = key_states.transpose(0,1)
|
| 665 |
+
#value_states = value_states
|
| 666 |
+
attn_output = self.core_attention(query_states, key_states, value_states)
|
| 667 |
+
#attn_output = self.core_attention(query_states, key_states, value_states, attention_mask)
|
| 668 |
+
q_len, bsz, _, _ = attn_output.shape
|
| 669 |
+
attn_output = attn_output.reshape(q_len, bsz, -1)
|
| 670 |
+
|
| 671 |
+
attn_output = self.o_proj(attn_output)
|
| 672 |
+
|
| 673 |
+
return attn_output, attn_weights, past_key_value
|
| 674 |
+
|
| 675 |
+
class MoEDroplessTokenDispatcher:
|
| 676 |
+
def __init__(self, num_experts: int, config: YuanConfig) -> None:
|
| 677 |
+
self.num_experts = num_experts
|
| 678 |
+
assert self.num_experts > 0, "Expected at least one expert"
|
| 679 |
+
self.router_topk = config.moe_config['moe_top_k']
|
| 680 |
+
|
| 681 |
+
def token_permutation(
|
| 682 |
+
self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor
|
| 683 |
+
):
|
| 684 |
+
self.hidden_shape = hidden_states.shape
|
| 685 |
+
hidden_states = hidden_states.view(-1, self.hidden_shape[-1])
|
| 686 |
+
|
| 687 |
+
if self.router_topk > 1:
|
| 688 |
+
global_local_map = torch.ones_like(max_ind).bool()
|
| 689 |
+
local_indices = max_ind.masked_select(global_local_map)
|
| 690 |
+
local_probs = max_prob.masked_select(global_local_map)
|
| 691 |
+
global_local_map = global_local_map.nonzero()[:, 0]
|
| 692 |
+
global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1])
|
| 693 |
+
local_hidden_states = torch.gather(hidden_states, 0, global_local_map)
|
| 694 |
+
|
| 695 |
+
indices = torch.argsort(local_indices, dim=0)
|
| 696 |
+
tokens_per_expert = torch.histc(
|
| 697 |
+
local_indices,
|
| 698 |
+
bins=self.num_experts,
|
| 699 |
+
min=0,
|
| 700 |
+
max=self.num_experts - 1,
|
| 701 |
+
)
|
| 702 |
+
tokens_per_expert = tokens_per_expert.cpu().to(torch.long)
|
| 703 |
+
|
| 704 |
+
indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1])
|
| 705 |
+
permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices)
|
| 706 |
+
return (permuted_local_hidden_states, tokens_per_expert, local_probs, indices, global_local_map)
|
| 707 |
+
|
| 708 |
+
def token_unpermutation(
|
| 709 |
+
self,
|
| 710 |
+
hidden_states: torch.Tensor,
|
| 711 |
+
scores: torch.Tensor,
|
| 712 |
+
indices: torch.Tensor,
|
| 713 |
+
global_local_map: torch.Tensor = None,
|
| 714 |
+
):
|
| 715 |
+
scores = scores.to(dtype=hidden_states.dtype)
|
| 716 |
+
unpermuted_local_hidden = torch.zeros_like(hidden_states)
|
| 717 |
+
assert indices.shape == hidden_states.shape, f'{indices.shape}, {hidden_states.shape}'
|
| 718 |
+
unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states)
|
| 719 |
+
|
| 720 |
+
if self.router_topk > 1:
|
| 721 |
+
unpermuted_local_hidden = unpermuted_local_hidden * scores.view(-1, 1)
|
| 722 |
+
unpermuted_local_bias = None
|
| 723 |
+
output_total = unpermuted_local_hidden
|
| 724 |
+
output_bias_total = unpermuted_local_bias
|
| 725 |
+
|
| 726 |
+
if self.router_topk > 1:
|
| 727 |
+
global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1]
|
| 728 |
+
global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]]
|
| 729 |
+
unpermuted_global_hidden = torch.zeros(
|
| 730 |
+
global_hidden_shape,
|
| 731 |
+
dtype=hidden_states.dtype,
|
| 732 |
+
device=hidden_states.device,
|
| 733 |
+
)
|
| 734 |
+
output_total = unpermuted_global_hidden.scatter_add(
|
| 735 |
+
0, global_local_map, unpermuted_local_hidden
|
| 736 |
+
)
|
| 737 |
+
|
| 738 |
+
output_total = output_total.view(self.hidden_shape)
|
| 739 |
+
|
| 740 |
+
return output_total
|
| 741 |
+
|
| 742 |
+
class GroupedMLP(nn.Module):
|
| 743 |
+
"""An efficient implementation of the Experts layer using CUTLASS GroupedGEMM.
|
| 744 |
+
|
| 745 |
+
This class is designed to execute multiple experts in parallel, thereby maximizing computational efficiency.
|
| 746 |
+
"""
|
| 747 |
+
|
| 748 |
+
def __init__(self, num_experts: int, config: YuanConfig):
|
| 749 |
+
super().__init__()
|
| 750 |
+
self.num_experts = num_experts
|
| 751 |
+
self.config = config
|
| 752 |
+
|
| 753 |
+
def glu(x):
|
| 754 |
+
x = torch.chunk(x, 2, dim=-1)
|
| 755 |
+
return torch.nn.functional.silu(x[0]) * x[1]
|
| 756 |
+
|
| 757 |
+
self.activation_func = glu
|
| 758 |
+
#self.ffn_hidden_size = config.moe_config['ffn_hidden_size']
|
| 759 |
+
self.ffn_hidden_size = config.ffn_hidden_size
|
| 760 |
+
fc1_output_size_per_partition = self.ffn_hidden_size * 2
|
| 761 |
+
fc2_input_size = self.ffn_hidden_size
|
| 762 |
+
|
| 763 |
+
self.w1 = nn.ModuleList([nn.Linear(self.config.hidden_size, self.ffn_hidden_size * 2, bias=False) for _ in range(num_experts)])
|
| 764 |
+
self.w2 = nn.ModuleList([nn.Linear(self.ffn_hidden_size, self.config.hidden_size, bias=False) for _ in range(num_experts)])
|
| 765 |
+
def forward(self, permuted_hidden_states, tokens_per_expert):
|
| 766 |
+
torch.cuda.set_device(permuted_hidden_states.device)
|
| 767 |
+
permuted_hidden_states = permuted_hidden_states#.to('cuda:0')
|
| 768 |
+
#fc1_output = gg.ops.gmm(permuted_hidden_states, self.weight1, tokens_per_expert.cpu(), trans_b=False)
|
| 769 |
+
|
| 770 |
+
#intermediate_parallel = self.activation_func(fc1_output)
|
| 771 |
+
#fc2_output = gg.ops.gmm(intermediate_parallel, self.weight2, tokens_per_expert.cpu(), trans_b=False)
|
| 772 |
+
|
| 773 |
+
fc2_outputs = []
|
| 774 |
+
start_idx = 0
|
| 775 |
+
for i in range(self.num_experts):
|
| 776 |
+
if tokens_per_expert[i] == 0:
|
| 777 |
+
continue
|
| 778 |
+
end_idx = start_idx + tokens_per_expert[i]
|
| 779 |
+
#fc1_output = torch.matmul(permuted_hidden_states[start_idx:end_idx], self.w1[i])
|
| 780 |
+
# Use custom attributes for each expert's Linear layers
|
| 781 |
+
|
| 782 |
+
fc1_output = self.w1[i](permuted_hidden_states[start_idx:end_idx])
|
| 783 |
+
#print("shape1:", self.w1[i].shape, "shape2:", permuted_hidden_states[start_idx:end_idx].shape)
|
| 784 |
+
intermediate_parallel = self.activation_func(fc1_output)
|
| 785 |
+
#fc2_output = torch.matmul(intermediate_parallel, self.w2[i])
|
| 786 |
+
fc2_output = self.w2[i](intermediate_parallel)
|
| 787 |
+
fc2_outputs.append(fc2_output)
|
| 788 |
+
start_idx = end_idx
|
| 789 |
+
fc2_output = torch.cat(fc2_outputs, dim=0)
|
| 790 |
+
return fc2_output#.to('cuda:1')
|
| 791 |
+
|
| 792 |
+
class YuanMoeLayer(nn.Module):
|
| 793 |
+
def __init__(self, config:YuanConfig):
|
| 794 |
+
super().__init__()
|
| 795 |
+
self.config = config
|
| 796 |
+
self.num_experts = config.moe_config['moe_num_experts']
|
| 797 |
+
self.top_k = config.moe_config['moe_top_k']
|
| 798 |
+
self.norm_topk_prob = config.moe_config['norm_topk_prob']
|
| 799 |
+
self.hidden_size = config.hidden_size
|
| 800 |
+
|
| 801 |
+
expert_indices_offset = (0)
|
| 802 |
+
|
| 803 |
+
#self.gate = ParallelAttention_router(config)
|
| 804 |
+
self.router = ParallelAttention_router(config)
|
| 805 |
+
self.token_dispatcher = MoEDroplessTokenDispatcher(self.num_experts, config=self.config)
|
| 806 |
+
self.experts = GroupedMLP(self.num_experts, self.config)
|
| 807 |
+
|
| 808 |
+
def routing(self, logits: torch.Tensor) -> torch.Tensor:
|
| 809 |
+
top_logits, indices = torch.topk(logits, k=self.top_k, dim=1)
|
| 810 |
+
scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits)
|
| 811 |
+
return scores, indices
|
| 812 |
+
|
| 813 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 814 |
+
batch_size, sequence_length, hidden_dim = hidden_states.shape
|
| 815 |
+
#logits = self.gate(hidden_states)
|
| 816 |
+
logits = self.router(hidden_states)
|
| 817 |
+
scores, indices = self.routing(logits)
|
| 818 |
+
scores = scores.to(hidden_states.dtype)
|
| 819 |
+
(dispatched_input, tokens_per_expert, scores, indices, global_local_map, ) = self.token_dispatcher.token_permutation(hidden_states, scores, indices)
|
| 820 |
+
expert_output = self.experts(dispatched_input, tokens_per_expert)
|
| 821 |
+
output = self.token_dispatcher.token_unpermutation(expert_output, scores, indices, global_local_map)
|
| 822 |
+
return output
|
| 823 |
+
|
| 824 |
+
class YuanDecoderLayer(nn.Module):
|
| 825 |
+
def __init__(self, config: YuanConfig, num_layer):
|
| 826 |
+
super().__init__()
|
| 827 |
+
self.hidden_size = config.hidden_size
|
| 828 |
+
self.self_attn = YuanAttention(config=config)
|
| 829 |
+
self.num_layer = num_layer
|
| 830 |
+
|
| 831 |
+
if config.moe_config['moe_num_experts'] > 0:
|
| 832 |
+
self.mlp = YuanMoeLayer(config)
|
| 833 |
+
else:
|
| 834 |
+
self.mlp = YuanMLP(
|
| 835 |
+
hidden_size=self.hidden_size,
|
| 836 |
+
intermediate_size=config.intermediate_size,
|
| 837 |
+
hidden_act=config.hidden_act,
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
|
| 841 |
+
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 842 |
+
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 843 |
+
|
| 844 |
+
def forward(
|
| 845 |
+
self,
|
| 846 |
+
hidden_states: torch.Tensor,
|
| 847 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 848 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 849 |
+
position_ids_k: Optional[torch.LongTensor] = None,
|
| 850 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 851 |
+
rotary_pos_emb: Optional[Tuple[torch.Tensor]] = None,
|
| 852 |
+
output_attentions: Optional[bool] = False,
|
| 853 |
+
use_cache: Optional[bool] = False,
|
| 854 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 855 |
+
"""
|
| 856 |
+
Args:
|
| 857 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 858 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 859 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 860 |
+
output_attentions (`bool`, *optional*):
|
| 861 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 862 |
+
returned tensors for more detail.
|
| 863 |
+
use_cache (`bool`, *optional*):
|
| 864 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 865 |
+
(see `past_key_values`).
|
| 866 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 867 |
+
"""
|
| 868 |
+
residual = hidden_states#.to('cuda:1')
|
| 869 |
+
torch.cuda.set_device(hidden_states.device)
|
| 870 |
+
hidden_states = self.input_layernorm(hidden_states) #.to('cuda:0')).to('cuda:1')
|
| 871 |
+
# Self Attention
|
| 872 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 873 |
+
hidden_states=hidden_states,
|
| 874 |
+
attention_mask=attention_mask,
|
| 875 |
+
position_ids=position_ids,
|
| 876 |
+
position_ids_k=position_ids_k,
|
| 877 |
+
past_key_value=past_key_value,
|
| 878 |
+
rotary_pos_emb=rotary_pos_emb,
|
| 879 |
+
output_attentions=output_attentions,
|
| 880 |
+
use_cache=use_cache,
|
| 881 |
+
)
|
| 882 |
+
hidden_states = residual + hidden_states.permute(1, 0, 2)
|
| 883 |
+
# Fully Connected
|
| 884 |
+
residual = hidden_states#.to('cuda:1')
|
| 885 |
+
torch.cuda.set_device(hidden_states.device)
|
| 886 |
+
hidden_states = self.post_attention_layernorm(hidden_states) #.to('cuda:0')).to('cuda:1')
|
| 887 |
+
hidden_states = self.mlp(hidden_states)# .to('cuda:1')
|
| 888 |
+
hidden_states = residual + hidden_states
|
| 889 |
+
outputs = (hidden_states,)
|
| 890 |
+
|
| 891 |
+
if output_attentions:
|
| 892 |
+
outputs += (self_attn_weights,)
|
| 893 |
+
|
| 894 |
+
if use_cache:
|
| 895 |
+
outputs += (present_key_value,)
|
| 896 |
+
|
| 897 |
+
return outputs
|
| 898 |
+
|
| 899 |
+
|
| 900 |
+
YUAN_START_DOCSTRING = r"""
|
| 901 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 902 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 903 |
+
etc.)
|
| 904 |
+
|
| 905 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 906 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 907 |
+
and behavior.
|
| 908 |
+
|
| 909 |
+
Parameters:
|
| 910 |
+
config ([`YuanConfig`]):
|
| 911 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 912 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 913 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 914 |
+
"""
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
@add_start_docstrings(
|
| 918 |
+
"The bare Yuan Model outputting raw hidden-states without any specific head on top.",
|
| 919 |
+
YUAN_START_DOCSTRING,
|
| 920 |
+
)
|
| 921 |
+
class YuanPreTrainedModel(PreTrainedModel):
|
| 922 |
+
config_class = YuanConfig
|
| 923 |
+
base_model_prefix = "model"
|
| 924 |
+
supports_gradient_checkpointing = True
|
| 925 |
+
_no_split_modules = ["YuanDecoderLayer"]
|
| 926 |
+
_skip_keys_device_placement = "past_key_values"
|
| 927 |
+
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
|
| 928 |
+
|
| 929 |
+
def _init_weights(self, module):
|
| 930 |
+
std = self.config.initializer_range
|
| 931 |
+
if isinstance(module, nn.Linear):
|
| 932 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 933 |
+
if module.bias is not None:
|
| 934 |
+
module.bias.data.zero_()
|
| 935 |
+
elif isinstance(module, nn.Embedding):
|
| 936 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 937 |
+
if module.padding_idx is not None:
|
| 938 |
+
module.weight.data[module.padding_idx].zero_()
|
| 939 |
+
|
| 940 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 941 |
+
if isinstance(module, YuanModel):
|
| 942 |
+
module.gradient_checkpointing = value
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
YUAN_INPUTS_DOCSTRING = r"""
|
| 946 |
+
Args:
|
| 947 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 948 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 949 |
+
it.
|
| 950 |
+
|
| 951 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 952 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 953 |
+
|
| 954 |
+
[What are input IDs?](../glossary#input-ids)
|
| 955 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 956 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 957 |
+
|
| 958 |
+
- 1 for tokens that are **not masked**,
|
| 959 |
+
- 0 for tokens that are **masked**.
|
| 960 |
+
|
| 961 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 962 |
+
|
| 963 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 964 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 965 |
+
|
| 966 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| 967 |
+
`past_key_values`).
|
| 968 |
+
|
| 969 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 970 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 971 |
+
information on the default strategy.
|
| 972 |
+
|
| 973 |
+
- 1 indicates the head is **not masked**,
|
| 974 |
+
- 0 indicates the head is **masked**.
|
| 975 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 976 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 977 |
+
config.n_positions - 1]`.
|
| 978 |
+
|
| 979 |
+
[What are position IDs?](../glossary#position-ids)
|
| 980 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 981 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
| 982 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
| 983 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
| 984 |
+
|
| 985 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 986 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
| 987 |
+
|
| 988 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
| 989 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
| 990 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
| 991 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 992 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 993 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 994 |
+
model's internal embedding lookup matrix.
|
| 995 |
+
use_cache (`bool`, *optional*):
|
| 996 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 997 |
+
`past_key_values`).
|
| 998 |
+
output_attentions (`bool`, *optional*):
|
| 999 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 1000 |
+
tensors for more detail.
|
| 1001 |
+
output_hidden_states (`bool`, *optional*):
|
| 1002 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1003 |
+
more detail.
|
| 1004 |
+
return_dict (`bool`, *optional*):
|
| 1005 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1006 |
+
"""
|
| 1007 |
+
|
| 1008 |
+
|
| 1009 |
+
@add_start_docstrings(
|
| 1010 |
+
"The bare Yuan Model outputting raw hidden-states without any specific head on top.",
|
| 1011 |
+
YUAN_START_DOCSTRING,
|
| 1012 |
+
)
|
| 1013 |
+
class YuanModel(YuanPreTrainedModel):
|
| 1014 |
+
"""
|
| 1015 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`YuanDecoderLayer`]
|
| 1016 |
+
|
| 1017 |
+
Args:
|
| 1018 |
+
config: YuanConfig
|
| 1019 |
+
"""
|
| 1020 |
+
|
| 1021 |
+
def __init__(self, config: YuanConfig):
|
| 1022 |
+
super().__init__(config)
|
| 1023 |
+
self.padding_idx = config.pad_token_id
|
| 1024 |
+
self.vocab_size = config.vocab_size
|
| 1025 |
+
|
| 1026 |
+
#TODO: control it by config
|
| 1027 |
+
self.eod_token = config.eod_token
|
| 1028 |
+
self.reset_attention_mask = config.reset_attention_mask
|
| 1029 |
+
self.reset_position_ids = config.reset_position_ids
|
| 1030 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 1031 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 1032 |
+
self.layers = nn.ModuleList([YuanDecoderLayer(config, i) for i in range(config.num_hidden_layers)])
|
| 1033 |
+
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 1034 |
+
self.gradient_checkpointing = False
|
| 1035 |
+
# Initialize weights and apply final processing
|
| 1036 |
+
self.post_init()
|
| 1037 |
+
|
| 1038 |
+
self.seq_length = config.max_position_embeddings
|
| 1039 |
+
rotary_dim = config.hidden_size // config.num_attention_heads
|
| 1040 |
+
if config.rotary_percent < 1.0:
|
| 1041 |
+
rotary_dim = int(rotary_dim * config.rotary_percent)
|
| 1042 |
+
self.rotary_pos_emb = YuanRotaryEmbedding(rotary_dim, base=config.rotary_base, dtype=config.torch_dtype)
|
| 1043 |
+
|
| 1044 |
+
|
| 1045 |
+
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
|
| 1046 |
+
return self.embed_tokens(input_ids)
|
| 1047 |
+
|
| 1048 |
+
def set_input_embeddings(self, value):
|
| 1049 |
+
self.embed_tokens = value
|
| 1050 |
+
|
| 1051 |
+
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
|
| 1052 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
| 1053 |
+
# create causal mask
|
| 1054 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
| 1055 |
+
combined_attention_mask = None
|
| 1056 |
+
if input_shape[-1] > 1:
|
| 1057 |
+
combined_attention_mask = _make_causal_mask(
|
| 1058 |
+
input_shape,
|
| 1059 |
+
inputs_embeds.dtype,
|
| 1060 |
+
device=inputs_embeds.device,
|
| 1061 |
+
past_key_values_length=past_key_values_length,
|
| 1062 |
+
)
|
| 1063 |
+
|
| 1064 |
+
if attention_mask is not None:
|
| 1065 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
| 1066 |
+
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
|
| 1067 |
+
inputs_embeds.device
|
| 1068 |
+
)
|
| 1069 |
+
combined_attention_mask = (
|
| 1070 |
+
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
| 1071 |
+
)
|
| 1072 |
+
|
| 1073 |
+
return combined_attention_mask
|
| 1074 |
+
|
| 1075 |
+
def _prepare_decoder_attention_mask_training(self, input_id, inputs_embeds, eod_token, reset_mask_flag ,reset_attention_mask=True, reset_position_ids=True):
|
| 1076 |
+
|
| 1077 |
+
micro_batch_size, seq_length = input_id.size()
|
| 1078 |
+
|
| 1079 |
+
attention_mask = torch.tril(torch.ones(
|
| 1080 |
+
(micro_batch_size, seq_length, seq_length), device=inputs_embeds.device)).view(
|
| 1081 |
+
micro_batch_size, 1, seq_length, seq_length)
|
| 1082 |
+
|
| 1083 |
+
position_ids = torch.arange(seq_length, dtype=torch.long,
|
| 1084 |
+
device=inputs_embeds.device)
|
| 1085 |
+
position_ids = position_ids.unsqueeze(0).expand_as(input_id)
|
| 1086 |
+
|
| 1087 |
+
if reset_position_ids:
|
| 1088 |
+
position_ids = position_ids.clone()
|
| 1089 |
+
|
| 1090 |
+
if reset_position_ids or reset_attention_mask:
|
| 1091 |
+
# Loop through the batches:
|
| 1092 |
+
for b in range(micro_batch_size):
|
| 1093 |
+
|
| 1094 |
+
# Find indecies where EOD token is.
|
| 1095 |
+
eod_index = position_ids[b, input_id[b] == eod_token]
|
| 1096 |
+
|
| 1097 |
+
# Detach indecies from positions if going to modify positions.
|
| 1098 |
+
if reset_position_ids:
|
| 1099 |
+
eod_index = eod_index.clone()
|
| 1100 |
+
# Loop through EOD indecies:
|
| 1101 |
+
prev_index = 0
|
| 1102 |
+
for j in range(eod_index.size()[0]):
|
| 1103 |
+
i = eod_index[j]
|
| 1104 |
+
# Mask attention loss.
|
| 1105 |
+
if reset_attention_mask:
|
| 1106 |
+
attention_mask[b, 0, (i + 1):, :(i + 1)] = 0
|
| 1107 |
+
# Reset positions.
|
| 1108 |
+
if reset_position_ids:
|
| 1109 |
+
position_ids[b, (i + 1):] -= (i + 1 - prev_index)
|
| 1110 |
+
prev_index = i + 1
|
| 1111 |
+
|
| 1112 |
+
inverted_mask = 1 - attention_mask
|
| 1113 |
+
output_attn_mask = inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min)
|
| 1114 |
+
if reset_mask_flag:
|
| 1115 |
+
output_attn_mask = output_attn_mask[:,:,-1:,:]
|
| 1116 |
+
return output_attn_mask, position_ids
|
| 1117 |
+
|
| 1118 |
+
@add_start_docstrings_to_model_forward(YUAN_INPUTS_DOCSTRING)
|
| 1119 |
+
def forward(
|
| 1120 |
+
self,
|
| 1121 |
+
input_ids: torch.LongTensor = None,
|
| 1122 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1123 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1124 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1125 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1126 |
+
use_cache: Optional[bool] = None,
|
| 1127 |
+
output_attentions: Optional[bool] = None,
|
| 1128 |
+
output_hidden_states: Optional[bool] = None,
|
| 1129 |
+
output_router_logits: Optional[bool] = None,
|
| 1130 |
+
return_dict: Optional[bool] = None,
|
| 1131 |
+
) -> Union[Tuple, BaseModelOutputWithPast, torch.Tensor]:
|
| 1132 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1133 |
+
output_router_logits = (
|
| 1134 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
| 1135 |
+
)
|
| 1136 |
+
output_hidden_states = (
|
| 1137 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1138 |
+
)
|
| 1139 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1140 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1141 |
+
input_ids1 = copy.deepcopy(input_ids)
|
| 1142 |
+
reset_mask_flag = False
|
| 1143 |
+
if past_key_values:
|
| 1144 |
+
input_ids = input_ids
|
| 1145 |
+
input_ids = input_ids[:,-1:]
|
| 1146 |
+
if use_cache:
|
| 1147 |
+
reset_mask_flag = True
|
| 1148 |
+
# retrieve input_ids and inputs_embeds
|
| 1149 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 1150 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
| 1151 |
+
elif input_ids is not None:
|
| 1152 |
+
input_ids = input_ids
|
| 1153 |
+
batch_size, seq_length = input_ids.shape
|
| 1154 |
+
elif inputs_embeds is not None:
|
| 1155 |
+
inputs_embeds = inputs_embeds.transpose(0,1)
|
| 1156 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
| 1157 |
+
else:
|
| 1158 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
| 1159 |
+
|
| 1160 |
+
seq_length_with_past = seq_length
|
| 1161 |
+
past_key_values_length = 0
|
| 1162 |
+
if past_key_values is not None:
|
| 1163 |
+
#past_key_values_length = past_key_values[0][0].shape[2]
|
| 1164 |
+
#modify
|
| 1165 |
+
print('0000')
|
| 1166 |
+
past_key_values_length = past_key_values[0][0].shape[0]
|
| 1167 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
| 1168 |
+
else:
|
| 1169 |
+
print('1111')
|
| 1170 |
+
|
| 1171 |
+
# modify to reset position ids
|
| 1172 |
+
if past_key_values is not None:
|
| 1173 |
+
pos_start = position_ids[:,-1]+1
|
| 1174 |
+
pos_end = pos_start+past_key_values[0][0].shape[0]-position_ids.shape[1]+1
|
| 1175 |
+
position_ids_k = torch.arange(pos_start.item(), pos_end.item()).to(position_ids.device)
|
| 1176 |
+
position_ids_k = position_ids_k.unsqueeze(0)
|
| 1177 |
+
position_ids_k = torch.cat((position_ids, position_ids_k), dim=1)
|
| 1178 |
+
position_ids = position_ids[:,-1]+past_key_values[0][0].shape[0]-position_ids.shape[1]+1
|
| 1179 |
+
position_ids = position_ids.unsqueeze(0)
|
| 1180 |
+
else:
|
| 1181 |
+
position_ids_k = position_ids
|
| 1182 |
+
|
| 1183 |
+
if position_ids is None:
|
| 1184 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1185 |
+
position_ids = torch.arange(
|
| 1186 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
| 1187 |
+
)
|
| 1188 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
| 1189 |
+
else:
|
| 1190 |
+
pass
|
| 1191 |
+
|
| 1192 |
+
if inputs_embeds is None:
|
| 1193 |
+
inputs_embeds = self.embed_tokens(input_ids).transpose(0,1)
|
| 1194 |
+
|
| 1195 |
+
if self.training or self.reset_position_ids:
|
| 1196 |
+
attention_mask, _ = self._prepare_decoder_attention_mask_training(input_ids1, inputs_embeds, self.eod_token, reset_mask_flag, self.reset_attention_mask, self.reset_position_ids)
|
| 1197 |
+
else:
|
| 1198 |
+
if attention_mask is None:
|
| 1199 |
+
attention_mask = torch.ones(
|
| 1200 |
+
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
|
| 1201 |
+
)
|
| 1202 |
+
attention_mask = self._prepare_decoder_attention_mask(
|
| 1203 |
+
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
|
| 1204 |
+
)
|
| 1205 |
+
|
| 1206 |
+
#rotary_pos_emb = self.rotary_pos_emb(self.max_position_embeddings)
|
| 1207 |
+
# Rotary positional embeddings (embedding is None for PP intermediate devices)
|
| 1208 |
+
rotary_pos_emb = None
|
| 1209 |
+
rotary_pos_emb = self.rotary_pos_emb(self.max_position_embeddings)
|
| 1210 |
+
|
| 1211 |
+
hidden_states = inputs_embeds
|
| 1212 |
+
if self.gradient_checkpointing and self.training:
|
| 1213 |
+
if use_cache:
|
| 1214 |
+
logger.warning_once(
|
| 1215 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 1216 |
+
)
|
| 1217 |
+
use_cache = False
|
| 1218 |
+
|
| 1219 |
+
# decoder layers
|
| 1220 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1221 |
+
all_self_attns = () if output_attentions else None
|
| 1222 |
+
next_decoder_cache = () if use_cache else None
|
| 1223 |
+
#position_ids = position_ids.cpu()
|
| 1224 |
+
#position_ids_k = position_ids_k.cpu()
|
| 1225 |
+
for idx, decoder_layer in enumerate(self.layers):
|
| 1226 |
+
if output_hidden_states:
|
| 1227 |
+
all_hidden_states += (hidden_states,)
|
| 1228 |
+
|
| 1229 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
| 1230 |
+
|
| 1231 |
+
if self.gradient_checkpointing and self.training:
|
| 1232 |
+
def create_custom_forward(module):
|
| 1233 |
+
def custom_forward(*inputs):
|
| 1234 |
+
# None for past_key_value
|
| 1235 |
+
return module(*inputs, output_attentions, None)
|
| 1236 |
+
|
| 1237 |
+
return custom_forward
|
| 1238 |
+
|
| 1239 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 1240 |
+
create_custom_forward(decoder_layer),
|
| 1241 |
+
hidden_states,
|
| 1242 |
+
attention_mask,
|
| 1243 |
+
position_ids,
|
| 1244 |
+
None,
|
| 1245 |
+
)
|
| 1246 |
+
else:
|
| 1247 |
+
layer_outputs = decoder_layer(
|
| 1248 |
+
hidden_states,
|
| 1249 |
+
attention_mask=attention_mask,
|
| 1250 |
+
position_ids=position_ids,
|
| 1251 |
+
position_ids_k=position_ids_k,
|
| 1252 |
+
past_key_value=past_key_value,
|
| 1253 |
+
rotary_pos_emb=rotary_pos_emb,
|
| 1254 |
+
output_attentions=output_attentions,
|
| 1255 |
+
use_cache=use_cache,
|
| 1256 |
+
)
|
| 1257 |
+
hidden_states = layer_outputs[0]
|
| 1258 |
+
|
| 1259 |
+
if use_cache:
|
| 1260 |
+
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
| 1261 |
+
|
| 1262 |
+
if output_attentions:
|
| 1263 |
+
all_self_attns += (layer_outputs[1],)
|
| 1264 |
+
hidden_states = hidden_states#.to('cuda:0')
|
| 1265 |
+
#torch.cuda.set_device(hidden_states.device)
|
| 1266 |
+
hidden_states = self.norm(hidden_states)
|
| 1267 |
+
#print(hidden_states)
|
| 1268 |
+
# add hidden states from the last decoder layer
|
| 1269 |
+
if output_hidden_states:
|
| 1270 |
+
all_hidden_states += (hidden_states,)
|
| 1271 |
+
next_cache = next_decoder_cache if use_cache else None
|
| 1272 |
+
if not return_dict:
|
| 1273 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| 1274 |
+
return BaseModelOutputWithPast(
|
| 1275 |
+
last_hidden_state=hidden_states,
|
| 1276 |
+
past_key_values=next_cache,
|
| 1277 |
+
hidden_states=all_hidden_states,
|
| 1278 |
+
attentions=all_self_attns,
|
| 1279 |
+
)
|
| 1280 |
+
|
| 1281 |
+
|
| 1282 |
+
class YuanForCausalLM(YuanPreTrainedModel, GenerationMixin):
|
| 1283 |
+
def __init__(self, config):
|
| 1284 |
+
super().__init__(config)
|
| 1285 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1286 |
+
self.model = YuanModel(config)
|
| 1287 |
+
#self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1288 |
+
self.post_init()
|
| 1289 |
+
|
| 1290 |
+
def get_input_embeddings(self):
|
| 1291 |
+
return self.model.embed_tokens
|
| 1292 |
+
|
| 1293 |
+
def set_input_embeddings(self, value):
|
| 1294 |
+
self.model.embed_tokens = value
|
| 1295 |
+
|
| 1296 |
+
def get_output_embeddings(self):
|
| 1297 |
+
return self.lm_head
|
| 1298 |
+
|
| 1299 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1300 |
+
self.lm_head = new_embeddings
|
| 1301 |
+
|
| 1302 |
+
def set_decoder(self, decoder):
|
| 1303 |
+
self.model = decoder
|
| 1304 |
+
|
| 1305 |
+
def get_decoder(self):
|
| 1306 |
+
return self.model
|
| 1307 |
+
|
| 1308 |
+
def get_loss_mask(self, input_ids, labels, eod_token, sep_token):
|
| 1309 |
+
micro_batch_size, seq_length = input_ids.size()
|
| 1310 |
+
loss_mask = torch.ones(input_ids.size(), dtype=torch.float, device=input_ids.device)
|
| 1311 |
+
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
|
| 1312 |
+
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
|
| 1313 |
+
|
| 1314 |
+
|
| 1315 |
+
"""modify loss_mask to only calculate the loss of the answer (separated with [SEP])"""
|
| 1316 |
+
|
| 1317 |
+
for b in range(micro_batch_size):
|
| 1318 |
+
eod_indexs = position_ids[b, input_ids[b] == eod_token]
|
| 1319 |
+
sep_indexs = position_ids[b, input_ids[b] == sep_token]
|
| 1320 |
+
|
| 1321 |
+
if len(eod_indexs) == 0 or len(sep_indexs) == 0:
|
| 1322 |
+
loss_mask[b] = 1.0
|
| 1323 |
+
else:
|
| 1324 |
+
if eod_indexs[0] > sep_indexs[0]:
|
| 1325 |
+
loss_mask[b, 0:sep_indexs[0]] = 0
|
| 1326 |
+
|
| 1327 |
+
if len(eod_indexs) == len(sep_indexs):
|
| 1328 |
+
for ii, eod_index in enumerate(eod_indexs):
|
| 1329 |
+
start_index = eod_index
|
| 1330 |
+
if ii == (len(sep_indexs) - 1):
|
| 1331 |
+
stop_index = seq_length
|
| 1332 |
+
else:
|
| 1333 |
+
stop_index = sep_indexs[ii + 1]
|
| 1334 |
+
loss_mask[b, start_index:stop_index] = 0.0
|
| 1335 |
+
else:
|
| 1336 |
+
if len(eod_indexs) > len(sep_indexs):
|
| 1337 |
+
loss_mask[b,:] = 1.0
|
| 1338 |
+
else:
|
| 1339 |
+
for ii, eod_index in enumerate(eod_indexs):
|
| 1340 |
+
start_index = eod_index
|
| 1341 |
+
stop_index = sep_indexs[ii + 1]
|
| 1342 |
+
|
| 1343 |
+
loss_mask[b, start_index:stop_index] = 0.0
|
| 1344 |
+
|
| 1345 |
+
elif eod_indexs[0] < sep_indexs[0]:
|
| 1346 |
+
|
| 1347 |
+
if len(eod_indexs) == len(sep_indexs):
|
| 1348 |
+
for ii, eod_index in enumerate(eod_indexs):
|
| 1349 |
+
start_index = eod_index
|
| 1350 |
+
stop_index = sep_indexs[ii]
|
| 1351 |
+
loss_mask[b, start_index:stop_index] = 0.0
|
| 1352 |
+
|
| 1353 |
+
else:
|
| 1354 |
+
if len(eod_indexs) < len(sep_indexs):
|
| 1355 |
+
loss_mask[b,:] = 1.0
|
| 1356 |
+
else:
|
| 1357 |
+
for ii, eod_index in enumerate(eod_indexs):
|
| 1358 |
+
start_index = eod_index
|
| 1359 |
+
if ii >= len(sep_indexs):
|
| 1360 |
+
stop_index = seq_length
|
| 1361 |
+
else:
|
| 1362 |
+
stop_index = sep_indexs[ii]
|
| 1363 |
+
loss_mask[b, start_index:stop_index] = 0.0
|
| 1364 |
+
|
| 1365 |
+
loss_mask[input_ids == eod_token] = 1.0
|
| 1366 |
+
return loss_mask
|
| 1367 |
+
@add_start_docstrings_to_model_forward(YUAN_INPUTS_DOCSTRING)
|
| 1368 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 1369 |
+
def forward(
|
| 1370 |
+
self,
|
| 1371 |
+
input_ids: torch.LongTensor = None,
|
| 1372 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1373 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1374 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1375 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1376 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1377 |
+
use_cache: Optional[bool] = None,
|
| 1378 |
+
output_attentions: Optional[bool] = None,
|
| 1379 |
+
output_hidden_states: Optional[bool] = None,
|
| 1380 |
+
return_dict: Optional[bool] = None,
|
| 1381 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 1382 |
+
"""
|
| 1383 |
+
## modify delete routers
|
| 1384 |
+
Args:
|
| 1385 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1386 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1387 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1388 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1389 |
+
|
| 1390 |
+
Returns:
|
| 1391 |
+
|
| 1392 |
+
Example:
|
| 1393 |
+
|
| 1394 |
+
```python
|
| 1395 |
+
>>> from transformers import AutoTokenizer, YuanForCausalLM
|
| 1396 |
+
|
| 1397 |
+
>>> model = YuanForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
| 1398 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
| 1399 |
+
|
| 1400 |
+
>>> prompt = "Hey, are you consciours? Can you talk to me?"
|
| 1401 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 1402 |
+
|
| 1403 |
+
>>> # Generate
|
| 1404 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 1405 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 1406 |
+
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
|
| 1407 |
+
```"""
|
| 1408 |
+
|
| 1409 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1410 |
+
|
| 1411 |
+
output_hidden_states = (
|
| 1412 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1413 |
+
)
|
| 1414 |
+
|
| 1415 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1416 |
+
|
| 1417 |
+
outputs = self.model(
|
| 1418 |
+
input_ids=input_ids,
|
| 1419 |
+
attention_mask=attention_mask,
|
| 1420 |
+
position_ids=position_ids,
|
| 1421 |
+
past_key_values=past_key_values,
|
| 1422 |
+
inputs_embeds=inputs_embeds,
|
| 1423 |
+
use_cache=use_cache,
|
| 1424 |
+
output_attentions=output_attentions,
|
| 1425 |
+
output_hidden_states=output_hidden_states,
|
| 1426 |
+
return_dict=return_dict,
|
| 1427 |
+
)
|
| 1428 |
+
hidden_states = outputs[0].transpose(0,1)
|
| 1429 |
+
#print(hidden_states)
|
| 1430 |
+
logits = self.lm_head(hidden_states)
|
| 1431 |
+
|
| 1432 |
+
loss = None
|
| 1433 |
+
if labels is not None:
|
| 1434 |
+
if self.use_loss_mask:
|
| 1435 |
+
loss_mask = self.get_loss_mask(input_ids, labels, self.eod_token, self.sep_token)
|
| 1436 |
+
# Shift so that tokens < n predict n
|
| 1437 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 1438 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 1439 |
+
# Flatten the tokens
|
| 1440 |
+
if self.use_loss_mask:
|
| 1441 |
+
loss_fct = CrossEntropyLoss(reduction='none')
|
| 1442 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 1443 |
+
shift_labels = shift_labels.view(-1)
|
| 1444 |
+
# Enable model parallelism
|
| 1445 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 1446 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 1447 |
+
loss = torch.sum(loss * loss_mask) / loss_mask.sum()
|
| 1448 |
+
else:
|
| 1449 |
+
loss_fct = CrossEntropyLoss()
|
| 1450 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 1451 |
+
shift_labels = shift_labels.view(-1)
|
| 1452 |
+
# Enable model parallelism
|
| 1453 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 1454 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 1455 |
+
if not return_dict:
|
| 1456 |
+
output = (logits,) + outputs[1:]
|
| 1457 |
+
return (loss,) + output if loss is not None else output
|
| 1458 |
+
|
| 1459 |
+
return CausalLMOutputWithPast(
|
| 1460 |
+
loss=loss,
|
| 1461 |
+
logits=logits,
|
| 1462 |
+
past_key_values=outputs.past_key_values,
|
| 1463 |
+
hidden_states=hidden_states,
|
| 1464 |
+
attentions=outputs.attentions,
|
| 1465 |
+
)
|
| 1466 |
+
|
| 1467 |
+
def prepare_inputs_for_generation(
|
| 1468 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| 1469 |
+
):
|
| 1470 |
+
|
| 1471 |
+
position_ids = kwargs.get("position_ids", None)
|
| 1472 |
+
if attention_mask is not None and position_ids is None:
|
| 1473 |
+
# create position_ids on the fly for batch generation
|
| 1474 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1475 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1476 |
+
if past_key_values:
|
| 1477 |
+
position_ids = position_ids[:, -1].unsqueeze(-1)
|
| 1478 |
+
|
| 1479 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 1480 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 1481 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 1482 |
+
else:
|
| 1483 |
+
model_inputs = {"input_ids": input_ids}
|
| 1484 |
+
|
| 1485 |
+
model_inputs.update(
|
| 1486 |
+
{
|
| 1487 |
+
"position_ids": position_ids,
|
| 1488 |
+
"past_key_values": past_key_values,
|
| 1489 |
+
"use_cache": kwargs.get("use_cache"),
|
| 1490 |
+
"attention_mask": attention_mask,
|
| 1491 |
+
}
|
| 1492 |
+
)
|
| 1493 |
+
return model_inputs
|
| 1494 |
+
|
| 1495 |
+
@staticmethod
|
| 1496 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 1497 |
+
reordered_past = ()
|
| 1498 |
+
for layer_past in past_key_values:
|
| 1499 |
+
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
| 1500 |
+
return reordered_past
|
| 1501 |
+
|
| 1502 |
+
|
| 1503 |
+
@add_start_docstrings(
|
| 1504 |
+
"""
|
| 1505 |
+
The Yuan Model transformer with a sequence classification head on top (linear layer).
|
| 1506 |
+
|
| 1507 |
+
[`YuanForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 1508 |
+
(e.g. GPT-2) do.
|
| 1509 |
+
|
| 1510 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 1511 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 1512 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 1513 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 1514 |
+
each row of the batch).
|
| 1515 |
+
""",
|
| 1516 |
+
YUAN_START_DOCSTRING,
|
| 1517 |
+
)
|
| 1518 |
+
class YuanForSequenceClassification(YuanPreTrainedModel):
|
| 1519 |
+
#_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
|
| 1520 |
+
|
| 1521 |
+
def __init__(self, config):
|
| 1522 |
+
super().__init__(config)
|
| 1523 |
+
self.num_labels = config.num_labels
|
| 1524 |
+
self.model = YuanModel(config)
|
| 1525 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 1526 |
+
|
| 1527 |
+
# Initialize weights and apply final processing
|
| 1528 |
+
self.post_init()
|
| 1529 |
+
|
| 1530 |
+
def get_input_embeddings(self):
|
| 1531 |
+
return self.model.embed_tokens
|
| 1532 |
+
|
| 1533 |
+
def set_input_embeddings(self, value):
|
| 1534 |
+
self.model.embed_tokens = value
|
| 1535 |
+
|
| 1536 |
+
@add_start_docstrings_to_model_forward(YUAN_INPUTS_DOCSTRING)
|
| 1537 |
+
def forward(
|
| 1538 |
+
self,
|
| 1539 |
+
input_ids: torch.LongTensor = None,
|
| 1540 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1541 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1542 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 1543 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1544 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1545 |
+
use_cache: Optional[bool] = None,
|
| 1546 |
+
output_attentions: Optional[bool] = None,
|
| 1547 |
+
output_hidden_states: Optional[bool] = None,
|
| 1548 |
+
return_dict: Optional[bool] = None,
|
| 1549 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| 1550 |
+
r"""
|
| 1551 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1552 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1553 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1554 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1555 |
+
"""
|
| 1556 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1557 |
+
transformer_outputs = self.model(
|
| 1558 |
+
input_ids,
|
| 1559 |
+
attention_mask=attention_mask,
|
| 1560 |
+
position_ids=position_ids,
|
| 1561 |
+
past_key_values=past_key_values,
|
| 1562 |
+
inputs_embeds=inputs_embeds,
|
| 1563 |
+
use_cache=use_cache,
|
| 1564 |
+
output_attentions=output_attentions,
|
| 1565 |
+
output_hidden_states=output_hidden_states,
|
| 1566 |
+
return_dict=return_dict,
|
| 1567 |
+
)
|
| 1568 |
+
hidden_states = transformer_outputs[0]
|
| 1569 |
+
logits = self.score(hidden_states)
|
| 1570 |
+
|
| 1571 |
+
if input_ids is not None:
|
| 1572 |
+
batch_size = input_ids.shape[0]
|
| 1573 |
+
else:
|
| 1574 |
+
batch_size = inputs_embeds.shape[0]
|
| 1575 |
+
|
| 1576 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 1577 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 1578 |
+
if self.config.pad_token_id is None:
|
| 1579 |
+
sequence_lengths = -1
|
| 1580 |
+
else:
|
| 1581 |
+
if input_ids is not None:
|
| 1582 |
+
sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
|
| 1583 |
+
else:
|
| 1584 |
+
sequence_lengths = -1
|
| 1585 |
+
|
| 1586 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
| 1587 |
+
|
| 1588 |
+
loss = None
|
| 1589 |
+
if labels is not None:
|
| 1590 |
+
labels = labels.to(logits.device)
|
| 1591 |
+
if self.config.problem_type is None:
|
| 1592 |
+
if self.num_labels == 1:
|
| 1593 |
+
self.config.problem_type = "regression"
|
| 1594 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1595 |
+
self.config.problem_type = "single_label_classification"
|
| 1596 |
+
else:
|
| 1597 |
+
self.config.problem_type = "multi_label_classification"
|
| 1598 |
+
|
| 1599 |
+
if self.config.problem_type == "regression":
|
| 1600 |
+
loss_fct = MSELoss()
|
| 1601 |
+
if self.num_labels == 1:
|
| 1602 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| 1603 |
+
else:
|
| 1604 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1605 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1606 |
+
loss_fct = CrossEntropyLoss()
|
| 1607 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| 1608 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1609 |
+
loss_fct = BCEWithLogitsLoss()
|
| 1610 |
+
loss = loss_fct(pooled_logits, labels)
|
| 1611 |
+
if not return_dict:
|
| 1612 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 1613 |
+
return ((loss,) + output) if loss is not None else output
|
| 1614 |
+
|
| 1615 |
+
return SequenceClassifierOutputWithPast(
|
| 1616 |
+
loss=loss,
|
| 1617 |
+
logits=pooled_logits,
|
| 1618 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 1619 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1620 |
+
attentions=transformer_outputs.attentions,
|
| 1621 |
+
)
|
| 1622 |
+
|
| 1623 |
+
|
| 1624 |
+
|
modeling_yuanvl_chat.py
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# YuanVL
|
| 3 |
+
# Copyright (c) 2024 OpenGVLab
|
| 4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import (Any, Callable, Iterable, List, Literal, Mapping, Optional,
|
| 9 |
+
Set, Tuple, Type, TypedDict, Union)
|
| 10 |
+
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
import transformers
|
| 13 |
+
import torch
|
| 14 |
+
from torch import nn
|
| 15 |
+
from torch.nn import CrossEntropyLoss
|
| 16 |
+
from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
|
| 17 |
+
LlamaTokenizer)
|
| 18 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 19 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 20 |
+
from transformers.generation import GenerationMixin
|
| 21 |
+
from transformers.utils import ModelOutput, logging
|
| 22 |
+
|
| 23 |
+
#from transformer_engine.pytorch import RMSNorm
|
| 24 |
+
from transformers.activations import ACT2FN
|
| 25 |
+
|
| 26 |
+
from .configuration_yuanvl import YuanVLChatConfig
|
| 27 |
+
from .conversation import get_conv_template
|
| 28 |
+
from .modeling_intern_vit import InternVisionModel, has_flash_attn
|
| 29 |
+
from .modeling_yuanlm2 import YuanForCausalLM
|
| 30 |
+
from .utils import flatten_bn, merge_multimodal_embeddings
|
| 31 |
+
|
| 32 |
+
logger = logging.get_logger(__name__)
|
| 33 |
+
|
| 34 |
+
class RMSNorm(torch.nn.Module):
|
| 35 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 36 |
+
super().__init__()
|
| 37 |
+
self.weight = torch.nn.Parameter(torch.ones(hidden_size))
|
| 38 |
+
self.variance_epsilon = eps
|
| 39 |
+
|
| 40 |
+
def forward(self, hidden_states):
|
| 41 |
+
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
|
| 42 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 43 |
+
|
| 44 |
+
# convert into half-precision if necessary
|
| 45 |
+
if self.weight.dtype in [torch.float16, torch.bfloat16]:
|
| 46 |
+
hidden_states = hidden_states.to(self.weight.dtype)
|
| 47 |
+
|
| 48 |
+
return self.weight * hidden_states
|
| 49 |
+
|
| 50 |
+
class InternVLImagePixelInputs(TypedDict):
|
| 51 |
+
type: Literal["pixel_values"]
|
| 52 |
+
data: Union[torch.Tensor, List[torch.Tensor]]
|
| 53 |
+
"""
|
| 54 |
+
Shape: `(batch_size, 1 + num_patches, num_channels, height, width)`
|
| 55 |
+
|
| 56 |
+
Note that `num_patches` may be different for each batch, in which case
|
| 57 |
+
the data is passed as a list instead of a batched tensor.
|
| 58 |
+
"""
|
| 59 |
+
patches_per_image: List[int]
|
| 60 |
+
"""
|
| 61 |
+
List of number of total patches for each image in the batch.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class InternVLImageEmbeddingInputs(TypedDict):
|
| 66 |
+
type: Literal["image_embeds"]
|
| 67 |
+
data: Any # in vllm vision this is a NestedTensors
|
| 68 |
+
"""
|
| 69 |
+
A tensor of shape `(num_images, total_image_feature_size, hidden_size)`
|
| 70 |
+
or a list of tensors of shape `(total_image_feature_size, hidden_size)`
|
| 71 |
+
|
| 72 |
+
`hidden_size` must match the hidden size of language model backbone.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
InternVLImageInputs = Union[InternVLImagePixelInputs,
|
| 77 |
+
InternVLImageEmbeddingInputs]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def version_cmp(v1, v2, op='eq'):
|
| 81 |
+
import operator
|
| 82 |
+
|
| 83 |
+
from packaging import version
|
| 84 |
+
op_func = getattr(operator, op)
|
| 85 |
+
return op_func(version.parse(v1), version.parse(v2))
|
| 86 |
+
|
| 87 |
+
class YuanImageMLP(nn.Module):
|
| 88 |
+
|
| 89 |
+
def __init__(
|
| 90 |
+
self,
|
| 91 |
+
hidden_size: int,
|
| 92 |
+
intermediate_size: int,
|
| 93 |
+
output_size: int,
|
| 94 |
+
hidden_act: str,
|
| 95 |
+
) -> None:
|
| 96 |
+
super().__init__()
|
| 97 |
+
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
| 98 |
+
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
| 99 |
+
self.down_proj = nn.Linear(intermediate_size, output_size, bias=False)
|
| 100 |
+
|
| 101 |
+
if hidden_act != "silu":
|
| 102 |
+
raise ValueError(f"Unsupported activation: {hidden_act}. Only silu is supported for now.")
|
| 103 |
+
|
| 104 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 105 |
+
|
| 106 |
+
@torch.compile
|
| 107 |
+
def swiglu(self, y_1, y_2):
|
| 108 |
+
return self.act_fn(y_1) * y_2
|
| 109 |
+
|
| 110 |
+
def forward(self, x):
|
| 111 |
+
x1 = self.up_proj(x)
|
| 112 |
+
x2 = self.gate_proj(x)
|
| 113 |
+
x3 = self.swiglu(x1, x2)
|
| 114 |
+
x = self.down_proj(x3)
|
| 115 |
+
return x
|
| 116 |
+
|
| 117 |
+
class YuanVLChatModel(PreTrainedModel, GenerationMixin):
|
| 118 |
+
config_class = YuanVLChatConfig
|
| 119 |
+
main_input_name = 'pixel_values'
|
| 120 |
+
base_model_prefix = 'language_model'
|
| 121 |
+
_supports_flash_attn_2 = True
|
| 122 |
+
_no_split_modules = ['InternVisionModel', 'YuanDeocderLayer']
|
| 123 |
+
|
| 124 |
+
def __init__(self, config: YuanVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
|
| 125 |
+
super().__init__(config)
|
| 126 |
+
|
| 127 |
+
assert version_cmp(transformers.__version__, '4.37.0', 'ge')
|
| 128 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
| 129 |
+
patch_size = config.vision_config.patch_size
|
| 130 |
+
self.patch_size = patch_size
|
| 131 |
+
self.select_layer = config.select_layer
|
| 132 |
+
self.template = config.template
|
| 133 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
| 134 |
+
self.downsample_ratio = config.downsample_ratio
|
| 135 |
+
self.ps_version = config.ps_version
|
| 136 |
+
use_flash_attn = use_flash_attn if has_flash_attn else False
|
| 137 |
+
config.vision_config.use_flash_attn = True if use_flash_attn else False
|
| 138 |
+
config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
|
| 139 |
+
|
| 140 |
+
logger.info(f'num_image_token: {self.num_image_token}')
|
| 141 |
+
logger.info(f'ps_version: {self.ps_version}')
|
| 142 |
+
if vision_model is not None:
|
| 143 |
+
self.vision_model = vision_model
|
| 144 |
+
else:
|
| 145 |
+
self.vision_model = InternVisionModel(config.vision_config)
|
| 146 |
+
if language_model is not None:
|
| 147 |
+
self.language_model = language_model
|
| 148 |
+
else:
|
| 149 |
+
if config.llm_config.architectures[0] == 'YuanForCausalLM':
|
| 150 |
+
self.language_model = YuanForCausalLM(config.llm_config)
|
| 151 |
+
else:
|
| 152 |
+
raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
|
| 153 |
+
|
| 154 |
+
self.pixel_unshuffle = torch.nn.PixelUnshuffle(downscale_factor=2)
|
| 155 |
+
layernorm_epsilon = config.llm_config.rms_norm_eps
|
| 156 |
+
|
| 157 |
+
self.imagemlp_input_hiddensize = int(config.vision_config.hidden_size / self.downsample_ratio ** 2)
|
| 158 |
+
self.imagemlp_ffn_hidden_size = config.llm_config.ffn_hidden_size
|
| 159 |
+
|
| 160 |
+
self.imagemlp = YuanImageMLP(self.imagemlp_input_hiddensize, self.imagemlp_ffn_hidden_size,
|
| 161 |
+
output_size=config.llm_config.hidden_size, hidden_act="silu")
|
| 162 |
+
self.imagemlp_layernorm = RMSNorm(config.llm_config.hidden_size, eps=layernorm_epsilon)
|
| 163 |
+
|
| 164 |
+
self.img_context_token_id = config.img_context_token_id
|
| 165 |
+
self.conv_template = get_conv_template(self.template)
|
| 166 |
+
self.system_message = self.conv_template.system_message
|
| 167 |
+
|
| 168 |
+
def _validate_pixel_values(self,
|
| 169 |
+
data: Union[torch.Tensor, List[torch.Tensor]]
|
| 170 |
+
) -> Union[torch.Tensor, List[torch.Tensor]]:
|
| 171 |
+
|
| 172 |
+
h = w = self.config.vision_config.image_size
|
| 173 |
+
expected_dims = (3, h, w)
|
| 174 |
+
|
| 175 |
+
def _validate_shape(d: torch.Tensor):
|
| 176 |
+
actual_dims = tuple(d.shape)
|
| 177 |
+
if actual_dims != expected_dims:
|
| 178 |
+
# expected_expr = ("num_patches", *map(str, expected_dims))
|
| 179 |
+
expected_expr = (expected_dims)
|
| 180 |
+
raise ValueError("The expected shape of pixel values in each batch element "
|
| 181 |
+
f" is {expected_expr}. You supplied {tuple(d.shape)}.")
|
| 182 |
+
# data的数据类型可以是tensor,也可以是List[tensor]
|
| 183 |
+
# 从这一段上来看,image tensor的个数为 imbs*num_images
|
| 184 |
+
for d in data:
|
| 185 |
+
_validate_shape(d)
|
| 186 |
+
return data
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def _parse_and_validate_image_input(self,
|
| 191 |
+
pixel_values: List[torch.Tensor] = None,
|
| 192 |
+
image_token_id: torch.Tensor = None,
|
| 193 |
+
image_embeds: torch.Tensor = None,
|
| 194 |
+
) -> Optional[InternVLImagePixelInputs]:
|
| 195 |
+
# 没有图像数据
|
| 196 |
+
if pixel_values is None and image_embeds is None:
|
| 197 |
+
return None
|
| 198 |
+
|
| 199 |
+
# 传入数据有image_embeds
|
| 200 |
+
if image_embeds is not None:
|
| 201 |
+
if not isinstance(image_embeds, torch.Tensor):
|
| 202 |
+
raise ValueError("Incorrect type of image embeddings. "
|
| 203 |
+
f"Got type: {type(image_embeds)}")
|
| 204 |
+
return InternVLImageEmbeddingInputs(
|
| 205 |
+
type="image_embeds",
|
| 206 |
+
data=flatten_bn(image_embeds),
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
#self.img_context_token_id = image_token_id[0]
|
| 210 |
+
if pixel_values is not None:
|
| 211 |
+
if not isinstance(pixel_values, (torch.Tensor, list)):
|
| 212 |
+
raise ValueError("Incorrect type of pixel values. "
|
| 213 |
+
f"Got type: {type(pixel_values)}")
|
| 214 |
+
patches_per_image = []
|
| 215 |
+
# bsz/request循环
|
| 216 |
+
for request_pixel_values in pixel_values:
|
| 217 |
+
# 每个request的images循环
|
| 218 |
+
patches_per_image.append(request_pixel_values.shape[0])
|
| 219 |
+
|
| 220 |
+
# We need to flatten (B, N, P) to (B*N*P)
|
| 221 |
+
# so we call flatten_bn twice.
|
| 222 |
+
# (total_patches, 3, h, w)
|
| 223 |
+
return InternVLImagePixelInputs(
|
| 224 |
+
type="pixel_values",
|
| 225 |
+
data=self._validate_pixel_values(flatten_bn(pixel_values)),
|
| 226 |
+
patches_per_image=patches_per_image)
|
| 227 |
+
raise AssertionError("This line should be unreachable")
|
| 228 |
+
|
| 229 |
+
def _process_image_input(
|
| 230 |
+
self,
|
| 231 |
+
image_input: InternVLImageInputs,
|
| 232 |
+
) -> Tuple[torch.Tensor] :
|
| 233 |
+
if image_input["type"] == "image_embeds":
|
| 234 |
+
return image_input["data"]
|
| 235 |
+
assert self.vision_model is not None
|
| 236 |
+
# (total_patches, tokens_per_image, llm_config.hidden_size)
|
| 237 |
+
image_embeds = self.extract_feature(image_input["data"])
|
| 238 |
+
patches_per_image = image_input["patches_per_image"]
|
| 239 |
+
|
| 240 |
+
# Only one image in the current batch
|
| 241 |
+
# bsz=1的情况,直接返回image_embeds
|
| 242 |
+
if len(patches_per_image) == 1:
|
| 243 |
+
# 返回一个tensor,[1, num_patches*256, text_config.hidden_size]
|
| 244 |
+
image_embeds = image_embeds.view(-1, self.config.llm_config.hidden_size).unsqueeze(1)
|
| 245 |
+
return image_embeds
|
| 246 |
+
# NOTE: Image embeddings are split into separate tensors for each image
|
| 247 |
+
# by the size of each embedding.
|
| 248 |
+
# feature_size 每个patch 256个token位置
|
| 249 |
+
feature_size = image_embeds.shape[1]
|
| 250 |
+
# (total_image_tokens, llm_config.hidden_size)
|
| 251 |
+
image_embeds = image_embeds.view(-1, self.config.llm_config.hidden_size)
|
| 252 |
+
image_feature_sizes = [num_patches * feature_size for num_patches in patches_per_image]
|
| 253 |
+
# 切分后得到一个Tuple,元组每个元胞表示一个image的image_embed, [num_patches * 256, llm_config.hidden_size]
|
| 254 |
+
image_embeds = image_embeds.split(image_feature_sizes)
|
| 255 |
+
|
| 256 |
+
return image_embeds
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def get_multimodal_embeddings(self,
|
| 261 |
+
pixel_values: Optional[List[torch.Tensor]] = None,
|
| 262 |
+
image_token_id: Optional[List[torch.Tensor]] = None,
|
| 263 |
+
image_embeds: Optional[List[torch.Tensor]] = None,
|
| 264 |
+
image_input: InternVLImageInputs = None,
|
| 265 |
+
):
|
| 266 |
+
image_input = self._parse_and_validate_image_input(pixel_values, image_token_id, image_embeds)
|
| 267 |
+
if image_input is None:
|
| 268 |
+
return None
|
| 269 |
+
|
| 270 |
+
# image_input: (total_patches, 3, h, w)
|
| 271 |
+
vision_embeddings = self._process_image_input(image_input)
|
| 272 |
+
return vision_embeddings
|
| 273 |
+
|
| 274 |
+
def get_input_embeddings(
|
| 275 |
+
self,
|
| 276 |
+
input_ids: torch.Tensor,
|
| 277 |
+
multimodal_embeddings: Optional[torch.Tensor]
|
| 278 |
+
) -> torch.Tensor:
|
| 279 |
+
# 生成 token_embeddings
|
| 280 |
+
inputs_embeds = self.language_model.model.get_input_embeddings(input_ids)
|
| 281 |
+
# 将image embed放到img_context_token_id的位置
|
| 282 |
+
if multimodal_embeddings is not None:
|
| 283 |
+
assert self.img_context_token_id is not None
|
| 284 |
+
# input_ids: torch.Tensor
|
| 285 |
+
# inputs_embeds: torch.Tensor
|
| 286 |
+
# multimodal_embeddings: torch.Tensor
|
| 287 |
+
# placeholder_token_id: img_context_token_id
|
| 288 |
+
inputs_embeds = merge_multimodal_embeddings(
|
| 289 |
+
input_ids, inputs_embeds, multimodal_embeddings,
|
| 290 |
+
self.img_context_token_id)
|
| 291 |
+
return inputs_embeds
|
| 292 |
+
|
| 293 |
+
def forward(
|
| 294 |
+
self,
|
| 295 |
+
input_ids: torch.LongTensor = None,
|
| 296 |
+
attention_mask: torch.Tensor = None,
|
| 297 |
+
position_ids: torch.LongTensor = None,
|
| 298 |
+
past_key_values: List[torch.FloatTensor] = None,
|
| 299 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 300 |
+
labels: Optional[torch.LongTensor] = None,
|
| 301 |
+
use_cache: Optional[bool] = None,
|
| 302 |
+
output_attentions: Optional[bool] = None,
|
| 303 |
+
output_hidden_states: Optional[bool] = None,
|
| 304 |
+
return_dict: Optional[bool] = None,
|
| 305 |
+
pixel_values: Optional[List[torch.Tensor]] = None,
|
| 306 |
+
image_token_id: Optional[List[torch.Tensor]] = None,
|
| 307 |
+
image_embeds: Optional[List[torch.Tensor]] = None,
|
| 308 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 309 |
+
|
| 310 |
+
if inputs_embeds is None:
|
| 311 |
+
# (images, patches * token_per_image)
|
| 312 |
+
vision_embeddings = self.get_multimodal_embeddings(pixel_values, image_token_id, image_embeds)
|
| 313 |
+
# (tokens, hidden_size)
|
| 314 |
+
if input_ids is not None:
|
| 315 |
+
vision_embeddings = vision_embeddings.to(input_ids.device)
|
| 316 |
+
inputs_embeds = self.get_input_embeddings(input_ids, vision_embeddings) #.permute(1, 0, 2)
|
| 317 |
+
input_ids = None
|
| 318 |
+
|
| 319 |
+
hidden_states = self.language_model.model(input_ids, attention_mask, position_ids, past_key_values,
|
| 320 |
+
inputs_embeds, labels, use_cache, output_attentions,
|
| 321 |
+
output_hidden_states, return_dict)
|
| 322 |
+
return hidden_states
|
| 323 |
+
|
| 324 |
+
def pixel_shuffle(self, x, scale_factor=0.5):
|
| 325 |
+
n, w, h, c = x.size()
|
| 326 |
+
# N, W, H, C --> N, W, H * scale, C // scale
|
| 327 |
+
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
|
| 328 |
+
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
|
| 329 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 330 |
+
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
|
| 331 |
+
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
|
| 332 |
+
int(c / (scale_factor * scale_factor)))
|
| 333 |
+
if self.ps_version == 'v1':
|
| 334 |
+
warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
|
| 335 |
+
'which results in a transposed image.')
|
| 336 |
+
else:
|
| 337 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 338 |
+
return x
|
| 339 |
+
|
| 340 |
+
# Internvl vision
|
| 341 |
+
def extract_feature(self, pixel_values):
|
| 342 |
+
# pixel_values: (imbs * num_image, ic, ih, iw)
|
| 343 |
+
pixel_values = pixel_values.to(torch.bfloat16)
|
| 344 |
+
output = self.vision_model(pixel_values=pixel_values)
|
| 345 |
+
vit_embeds=output[0]
|
| 346 |
+
# vit_embeds: (imbs * num_images, h*w, vit_dim)
|
| 347 |
+
vit_embeds = vit_embeds[:, 1:, :]
|
| 348 |
+
|
| 349 |
+
pn, phw, pc = vit_embeds.shape
|
| 350 |
+
ph = pw = int(phw**0.5)
|
| 351 |
+
vit_embeds = vit_embeds.view(pn, ph, pw, pc).permute(0, 3, 1, 2)
|
| 352 |
+
vit_embeds = self.pixel_unshuffle(vit_embeds)
|
| 353 |
+
pn, pc, ph, pw = vit_embeds.shape
|
| 354 |
+
vit_embeds = vit_embeds.view(pn, pc, ph * pw).permute(0, 2, 1)
|
| 355 |
+
num_images, cvs, chs = vit_embeds.shape
|
| 356 |
+
#_, cvs, chs = vit_embeds.shape
|
| 357 |
+
#assert self.imagemlp_ffn_hidden_size == chs
|
| 358 |
+
#vit_embeds = vit_embeds.contiguous().view(imbs, num_image * cvs, chs).permute(1, 0, 2).contiguous()
|
| 359 |
+
vit_embeds = vit_embeds.reshape(1, -1, vit_embeds.shape[-1]).permute(1, 0, 2)
|
| 360 |
+
vit_embeds = self.imagemlp(vit_embeds)
|
| 361 |
+
vit_embeds = self.imagemlp_layernorm(vit_embeds)
|
| 362 |
+
vit_embeds = vit_embeds.view(num_images, cvs, -1)
|
| 363 |
+
return vit_embeds
|
| 364 |
+
|
| 365 |
+
@torch.no_grad()
|
| 366 |
+
def generate(
|
| 367 |
+
self,
|
| 368 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 369 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
| 370 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 371 |
+
visual_features: Optional[torch.FloatTensor] = None,
|
| 372 |
+
generation_config: Optional[GenerationConfig] = None,
|
| 373 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 374 |
+
output_hidden_states: Optional[bool] = None,
|
| 375 |
+
) -> torch.LongTensor:
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
if pixel_values is not None:
|
| 379 |
+
if visual_features is not None:
|
| 380 |
+
vit_embeds = visual_features
|
| 381 |
+
else:
|
| 382 |
+
vit_embeds = self.get_multimodal_embeddings(pixel_values)
|
| 383 |
+
if input_ids is not None:
|
| 384 |
+
vit_embeds = vit_embeds.to(input_ids.device)
|
| 385 |
+
inputs_embeds = self.get_input_embeddings(input_ids, vit_embeds)
|
| 386 |
+
input_ids = None
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
outputs = self.language_model.generate(
|
| 390 |
+
inputs_embeds=inputs_embeds,
|
| 391 |
+
attention_mask=attention_mask,
|
| 392 |
+
generation_config=generation_config,
|
| 393 |
+
output_hidden_states=output_hidden_states,
|
| 394 |
+
position_ids=position_ids,
|
| 395 |
+
max_length=8192,
|
| 396 |
+
use_cache=True,
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
return outputs
|
mq_test_demo.py
ADDED
|
@@ -0,0 +1,576 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from transformers import AutoModel, AutoTokenizer
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
|
| 5 |
+
CenterCrop, ColorJitter, Grayscale
|
| 6 |
+
import math
|
| 7 |
+
|
| 8 |
+
FILE_EXTENSIONS = ('.jpeg', '.txt', '.idx')
|
| 9 |
+
'''
|
| 10 |
+
args = {
|
| 11 |
+
"patch_size": 16,
|
| 12 |
+
"patch_num_width": 16,
|
| 13 |
+
"patch_num_height": 16,
|
| 14 |
+
"position_embedding_length": 4096,
|
| 15 |
+
"clip_model_name": 'InternViT-448',
|
| 16 |
+
"image_segment_method": 'dynamic',
|
| 17 |
+
"max_split_tile_num_multi_image": 1,
|
| 18 |
+
"clip_visual_size": 1024,
|
| 19 |
+
"clip_hidden_size": 1024,
|
| 20 |
+
"downsample_ratio": 0.5
|
| 21 |
+
}
|
| 22 |
+
'''
|
| 23 |
+
class args:
|
| 24 |
+
patch_size = 16
|
| 25 |
+
patch_num_width = 16
|
| 26 |
+
patch_num_height = 16
|
| 27 |
+
position_embedding_length = 4096
|
| 28 |
+
clip_model_name = 'InternViT-448'
|
| 29 |
+
image_segment_method = 'dynamic' ##'adaptive'
|
| 30 |
+
max_split_tile_num_multi_image = 1
|
| 31 |
+
max_split_tile_num_single_image = 9
|
| 32 |
+
clip_visual_size = 1024
|
| 33 |
+
clip_hidden_size = 1024
|
| 34 |
+
downsample_ratio = 0.5
|
| 35 |
+
shape_change_threshold = 0.5
|
| 36 |
+
bf16 = True
|
| 37 |
+
fp16 = False
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size, threshold):
|
| 41 |
+
best_ratio_diff = float('inf')
|
| 42 |
+
best_ratio = (1, 1)
|
| 43 |
+
area = width * height
|
| 44 |
+
for ratio in target_ratios:
|
| 45 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
| 46 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
| 47 |
+
size_diff_length = abs(((ratio[0]*image_size + ratio[1]*image_size)-(width+height)) / (width+height))
|
| 48 |
+
if ratio_diff < best_ratio_diff and size_diff_length <= threshold:
|
| 49 |
+
best_ratio_diff = ratio_diff
|
| 50 |
+
best_ratio = ratio
|
| 51 |
+
elif ratio_diff == best_ratio_diff:
|
| 52 |
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
| 53 |
+
best_ratio = ratio
|
| 54 |
+
return best_ratio
|
| 55 |
+
|
| 56 |
+
def build_transform(input_size):
|
| 57 |
+
#MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
| 58 |
+
transform = Compose([
|
| 59 |
+
Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
| 60 |
+
_convert_to_rgb,
|
| 61 |
+
ToTensor(),
|
| 62 |
+
Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
|
| 63 |
+
])
|
| 64 |
+
return transform
|
| 65 |
+
|
| 66 |
+
def torch_extract_patches(image_tensor, patch_height, patch_width):
|
| 67 |
+
PATCH_SIZE = args.patch_size
|
| 68 |
+
PATCH_NUM_WIDTH = args.patch_num_width
|
| 69 |
+
PATCH_NUM_HEIGHT = args.patch_num_height
|
| 70 |
+
POSITION_EMBEDDING_LENGTH = args.position_embedding_length
|
| 71 |
+
print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
|
| 72 |
+
# 576
|
| 73 |
+
MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
|
| 74 |
+
#
|
| 75 |
+
TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
|
| 76 |
+
# 336 336
|
| 77 |
+
IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
|
| 78 |
+
IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
|
| 79 |
+
image_tensor = image_tensor.unsqueeze(0)
|
| 80 |
+
patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width))
|
| 81 |
+
patches = patches.reshape(image_tensor.size(0), image_tensor.size(1), patch_height, patch_width, -1)
|
| 82 |
+
patches = patches.permute(0, 4, 2, 3, 1).reshape(
|
| 83 |
+
image_tensor.size(2) // patch_height,
|
| 84 |
+
image_tensor.size(3) // patch_width,
|
| 85 |
+
image_tensor.size(1) * patch_height * patch_width,
|
| 86 |
+
)
|
| 87 |
+
return patches.unsqueeze(0)
|
| 88 |
+
|
| 89 |
+
# 用于计算adapt需要输入图片的大小
|
| 90 |
+
def adapt_size(originHeight:int,originWeight:int):
|
| 91 |
+
### 用于计算adapt的图片大小
|
| 92 |
+
# 参数说明
|
| 93 |
+
# originHeight: 原图高度
|
| 94 |
+
# originWidth: 原图宽度
|
| 95 |
+
# patchHeight: patch高度
|
| 96 |
+
# patchWidth: patch宽度
|
| 97 |
+
# maxPatches: patch数目上限
|
| 98 |
+
# 返回值说明:
|
| 99 |
+
# resized_height: 插值后图片高度
|
| 100 |
+
# resized_width: 插值后图片宽度
|
| 101 |
+
# resized_patch_height_num: 插值后图片垂直patch数目
|
| 102 |
+
# resized_patch_width_num: 插值后图片水平patch数目
|
| 103 |
+
PATCH_SIZE = args.patch_size
|
| 104 |
+
PATCH_NUM_WIDTH = args.patch_num_width
|
| 105 |
+
PATCH_NUM_HEIGHT = args.patch_num_height
|
| 106 |
+
POSITION_EMBEDDING_LENGTH = args.position_embedding_length
|
| 107 |
+
print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
|
| 108 |
+
# 576
|
| 109 |
+
MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
|
| 110 |
+
#
|
| 111 |
+
TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
|
| 112 |
+
# 336 336
|
| 113 |
+
IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
|
| 114 |
+
IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
|
| 115 |
+
patchHeight = PATCH_SIZE
|
| 116 |
+
patchWidth = PATCH_SIZE
|
| 117 |
+
maxPatches = MAX_PATCHES
|
| 118 |
+
scale = math.sqrt(maxPatches * (patchHeight / originHeight) * (patchWidth / originWeight))
|
| 119 |
+
resized_patch_height_num = max(min(math.floor(scale * originHeight / patchHeight), maxPatches), 1)
|
| 120 |
+
resized_patch_width_num = max(min(math.floor(scale * originWeight / patchWidth), maxPatches), 1)
|
| 121 |
+
resized_height = max(resized_patch_height_num * PATCH_SIZE, 1)
|
| 122 |
+
resized_width = max(resized_patch_width_num * PATCH_SIZE, 1)
|
| 123 |
+
return resized_height, resized_width, resized_patch_height_num, resized_patch_width_num
|
| 124 |
+
|
| 125 |
+
def cal_num_of_slices(origin_image_width, origin_image_height, max_num):
|
| 126 |
+
#import pdb
|
| 127 |
+
#pdb.set_trace()
|
| 128 |
+
PATCH_SIZE = args.patch_size
|
| 129 |
+
PATCH_NUM_WIDTH = args.patch_num_width
|
| 130 |
+
PATCH_NUM_HEIGHT = args.patch_num_height
|
| 131 |
+
POSITION_EMBEDDING_LENGTH = args.position_embedding_length
|
| 132 |
+
print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
|
| 133 |
+
# 576
|
| 134 |
+
MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
|
| 135 |
+
#
|
| 136 |
+
TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
|
| 137 |
+
# 336 336
|
| 138 |
+
IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
|
| 139 |
+
IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
|
| 140 |
+
scale = origin_image_width*origin_image_height/(IMAGE_WIDTH*IMAGE_HEIGHT)
|
| 141 |
+
|
| 142 |
+
scale = math.ceil(scale)
|
| 143 |
+
max_num_img=max_num
|
| 144 |
+
if scale > max_num_img:
|
| 145 |
+
scale = max_num_img
|
| 146 |
+
def factorize(n):
|
| 147 |
+
factors = []
|
| 148 |
+
for i in range(1, n + 1):
|
| 149 |
+
if n % i == 0:
|
| 150 |
+
factors.append((i/(n/i), i, n // i))
|
| 151 |
+
return factors
|
| 152 |
+
numbers = [1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15]
|
| 153 |
+
factor_dict = {}
|
| 154 |
+
for num in numbers:
|
| 155 |
+
factor_dict[num] = factorize(num)
|
| 156 |
+
log_origin_ratio = math.log(origin_image_width/origin_image_height)
|
| 157 |
+
available_ratios = []
|
| 158 |
+
if scale<=2:
|
| 159 |
+
available_ratios = factor_dict[scale] + factor_dict[scale + 1]
|
| 160 |
+
else :
|
| 161 |
+
available_ratios = factor_dict[scale-1] + factor_dict[scale]+factor_dict[scale+1]
|
| 162 |
+
|
| 163 |
+
min_dif = 1000
|
| 164 |
+
best_w = 0
|
| 165 |
+
best_h = 0
|
| 166 |
+
for (r,w_slice,h_slice) in available_ratios:
|
| 167 |
+
log_r = math.log(r)
|
| 168 |
+
if min_dif > abs(log_r - log_origin_ratio):
|
| 169 |
+
min_dif = abs(log_r - log_origin_ratio)
|
| 170 |
+
best_w = w_slice
|
| 171 |
+
best_h = h_slice
|
| 172 |
+
return best_w,best_h
|
| 173 |
+
# 做图片切片
|
| 174 |
+
def get_patch_nums(origin_image_width, origin_image_height, max_num):
|
| 175 |
+
# 输入原图的尺寸
|
| 176 |
+
# 返回:
|
| 177 |
+
# slice_w_num 切片的w方向有多少个patch
|
| 178 |
+
# slice_h_num 切片的h方向有多少个patch
|
| 179 |
+
# abstract_w_num 原图的w方向有多少个patch
|
| 180 |
+
# abstract_h_num 原图的h方向有多少个patch
|
| 181 |
+
PATCH_SIZE = args.patch_size
|
| 182 |
+
PATCH_NUM_WIDTH = args.patch_num_width
|
| 183 |
+
PATCH_NUM_HEIGHT = args.patch_num_height
|
| 184 |
+
POSITION_EMBEDDING_LENGTH = args.position_embedding_length
|
| 185 |
+
print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
|
| 186 |
+
# 576
|
| 187 |
+
MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
|
| 188 |
+
#
|
| 189 |
+
TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
|
| 190 |
+
# 336 336
|
| 191 |
+
IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
|
| 192 |
+
IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
|
| 193 |
+
|
| 194 |
+
best_w, best_h = cal_num_of_slices(origin_image_width,origin_image_height, max_num)
|
| 195 |
+
slice_width = origin_image_width//best_w
|
| 196 |
+
slice_height = origin_image_height//best_h
|
| 197 |
+
_,_,slice_h_num,slice_w_num = adapt_size(slice_height,slice_width)
|
| 198 |
+
_,_,abstract_h_num,abstract_w_num = adapt_size(origin_image_height,origin_image_width)
|
| 199 |
+
#print(slice_w_num,slice_h_num,abstract_w_num,abstract_h_num)
|
| 200 |
+
return slice_w_num,slice_h_num,abstract_w_num,abstract_h_num
|
| 201 |
+
|
| 202 |
+
def slice_image(image, max_num):
|
| 203 |
+
|
| 204 |
+
# slice the image according to our princeple
|
| 205 |
+
# return an array of slices
|
| 206 |
+
PATCH_SIZE = args.patch_size
|
| 207 |
+
PATCH_NUM_WIDTH = args.patch_num_width
|
| 208 |
+
PATCH_NUM_HEIGHT = args.patch_num_height
|
| 209 |
+
POSITION_EMBEDDING_LENGTH = args.position_embedding_length
|
| 210 |
+
print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
|
| 211 |
+
# 576
|
| 212 |
+
MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
|
| 213 |
+
#
|
| 214 |
+
TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
|
| 215 |
+
# 336 336
|
| 216 |
+
IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
|
| 217 |
+
IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
|
| 218 |
+
|
| 219 |
+
origin_image_width = image.size[0]
|
| 220 |
+
origin_image_height = image.size[1]
|
| 221 |
+
|
| 222 |
+
best_w, best_h = cal_num_of_slices(origin_image_width=origin_image_width, origin_image_height=origin_image_height, max_num=max_num )
|
| 223 |
+
slices = []
|
| 224 |
+
# print(best_w,best_h)
|
| 225 |
+
|
| 226 |
+
for j in range(best_h):
|
| 227 |
+
for i in range(best_w):
|
| 228 |
+
|
| 229 |
+
box = (i * origin_image_width//best_w, j * origin_image_height//best_h, (i + 1) * origin_image_width//best_w, (j + 1) * origin_image_height//best_h)
|
| 230 |
+
# 切割图片
|
| 231 |
+
region = image.crop(box).convert("RGB")
|
| 232 |
+
# 添加到列表
|
| 233 |
+
slices.append(region)
|
| 234 |
+
|
| 235 |
+
return slices
|
| 236 |
+
def dynamic_preprocess(image, min_num=1, max_num=6, image_size=448, use_thumbnail=False, threshold=1):
|
| 237 |
+
orig_width, orig_height = image.size
|
| 238 |
+
aspect_ratio = orig_width / orig_height
|
| 239 |
+
|
| 240 |
+
# calculate the existing image aspect ratio
|
| 241 |
+
target_ratios = set(
|
| 242 |
+
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
| 243 |
+
i * j <= max_num and i * j >= min_num)
|
| 244 |
+
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
| 245 |
+
# find the closest aspect ratio to the target
|
| 246 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
| 247 |
+
aspect_ratio, target_ratios, orig_width, orig_height, image_size, threshold)
|
| 248 |
+
# calculate the target width and height
|
| 249 |
+
target_width = image_size * target_aspect_ratio[0]
|
| 250 |
+
target_height = image_size * target_aspect_ratio[1]
|
| 251 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 252 |
+
|
| 253 |
+
# resize the image
|
| 254 |
+
resized_img = image.resize((target_width, target_height))
|
| 255 |
+
processed_images = []
|
| 256 |
+
for i in range(blocks):
|
| 257 |
+
box = (
|
| 258 |
+
(i % (target_width // image_size)) * image_size,
|
| 259 |
+
(i // (target_width // image_size)) * image_size,
|
| 260 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
| 261 |
+
((i // (target_width // image_size)) + 1) * image_size
|
| 262 |
+
)
|
| 263 |
+
print(box)
|
| 264 |
+
# split the image
|
| 265 |
+
split_img = resized_img.crop(box)
|
| 266 |
+
processed_images.append(split_img)
|
| 267 |
+
assert len(processed_images) == blocks
|
| 268 |
+
if use_thumbnail and len(processed_images) != 1:
|
| 269 |
+
thumbnail_img = image.resize((image_size, image_size))
|
| 270 |
+
processed_images.append(thumbnail_img)
|
| 271 |
+
return processed_images
|
| 272 |
+
|
| 273 |
+
def process_image(image, image_size, max_num):
|
| 274 |
+
PATCH_SIZE = args.patch_size
|
| 275 |
+
PATCH_NUM_WIDTH = args.patch_num_width
|
| 276 |
+
PATCH_NUM_HEIGHT = args.patch_num_height
|
| 277 |
+
POSITION_EMBEDDING_LENGTH = args.position_embedding_length
|
| 278 |
+
print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
|
| 279 |
+
# 576
|
| 280 |
+
MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
|
| 281 |
+
#
|
| 282 |
+
TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
|
| 283 |
+
# 336 336
|
| 284 |
+
IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
|
| 285 |
+
IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
|
| 286 |
+
|
| 287 |
+
origin_image_width = image.size[0]
|
| 288 |
+
origin_image_height = image.size[1]
|
| 289 |
+
image = image.convert("RGB")
|
| 290 |
+
slices = slice_image(image, max_num)
|
| 291 |
+
if len(slices) != 1:
|
| 292 |
+
thumbnail_img = image.resize((image_size, image_size))
|
| 293 |
+
slices.append(thumbnail_img)
|
| 294 |
+
# 计算resize之后的图片大小
|
| 295 |
+
resized_height, resized_width, resized_patch_height, resized_patch_width = \
|
| 296 |
+
adapt_size(origin_image_height,origin_image_width)
|
| 297 |
+
image = slices[0]
|
| 298 |
+
image_w = image.size[0]
|
| 299 |
+
image_h = image.size[1]
|
| 300 |
+
resized_height, resized_width, resized_patch_height, resized_patch_width = \
|
| 301 |
+
adapt_size(image_h,image_w)
|
| 302 |
+
image = ToTensor()(image)
|
| 303 |
+
|
| 304 |
+
image = torch.nn.functional.interpolate(
|
| 305 |
+
image.unsqueeze(0),
|
| 306 |
+
size=(resized_height, resized_width),
|
| 307 |
+
mode="bilinear",
|
| 308 |
+
align_corners=False,
|
| 309 |
+
antialias=True,
|
| 310 |
+
).squeeze(0)
|
| 311 |
+
# 需要mask的patch数
|
| 312 |
+
num_patches_to_pad = MAX_PATCHES - resized_patch_height*resized_patch_width
|
| 313 |
+
# raprint("mask: ",num_patches_to_pad)
|
| 314 |
+
# 切割resize好的图片
|
| 315 |
+
image = torch_extract_patches(image,PATCH_SIZE, PATCH_SIZE)
|
| 316 |
+
image = image.reshape([resized_patch_width*resized_patch_height,TOKEN_LENGTH])
|
| 317 |
+
# 用0补全需要mask的图片部分
|
| 318 |
+
image = torch.nn.functional.pad(image, [0, 0, 0, num_patches_to_pad]).float() #torch.Size([196, 768])
|
| 319 |
+
image = image.reshape(PATCH_NUM_WIDTH, PATCH_NUM_HEIGHT, PATCH_SIZE, PATCH_SIZE, 3).permute(0, 2, 1, 3, 4).reshape(IMAGE_WIDTH, IMAGE_HEIGHT, 3).permute(2, 0 ,1)
|
| 320 |
+
#print(image.shape)
|
| 321 |
+
#image = torch.stack(image)
|
| 322 |
+
return slices
|
| 323 |
+
|
| 324 |
+
def _convert_to_rgb(image):
|
| 325 |
+
return image.convert('RGB')
|
| 326 |
+
|
| 327 |
+
def load_image(image_file, input_size=448, max_num=9):
|
| 328 |
+
image = Image.open(image_file).convert('RGB')
|
| 329 |
+
# image.save('seg_imge/'+image_file.split('/')[-1])
|
| 330 |
+
# print(max_num)
|
| 331 |
+
if args.clip_model_name == 'InternViT-448':
|
| 332 |
+
transform = build_transform(input_size=input_size)
|
| 333 |
+
#image_processor = CLIPImageProcessor.from_pretrained(args.clip_download_path)
|
| 334 |
+
#'/mnt/beegfs1/shenqiang/internvit-448/models--InternViT-300M-448px/'args.clip_download_path
|
| 335 |
+
if args.image_segment_method == 'adaptive':
|
| 336 |
+
images_processed = process_image(image, input_size, max_num)
|
| 337 |
+
elif args.image_segment_method == 'dynamic':
|
| 338 |
+
images_processed = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num, threshold=args.shape_change_threshold)
|
| 339 |
+
# pixel_values = [image_processor(images=image, return_tensors='pt').pixel_values.squeeze(0) for image in images_processed]
|
| 340 |
+
pixel_values = [transform(image) for image in images_processed]
|
| 341 |
+
else:
|
| 342 |
+
transform = build_transform(input_size=input_size)
|
| 343 |
+
if args.image_segment_method == 'adaptive':
|
| 344 |
+
images_processed = process_image(image, input_size, max_num)
|
| 345 |
+
elif args.image_segment_method == 'dynamic':
|
| 346 |
+
images_processed = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
| 347 |
+
pixel_values = [transform(image) for image in images_processed]
|
| 348 |
+
|
| 349 |
+
pixel_values = torch.stack(pixel_values)
|
| 350 |
+
|
| 351 |
+
return pixel_values
|
| 352 |
+
|
| 353 |
+
def preocess_imput(args, num_token_per_tile, image_path, question):
|
| 354 |
+
image_prompts = ''
|
| 355 |
+
if len(image_path) >= 2:
|
| 356 |
+
image_list = []
|
| 357 |
+
num_tile_per_image_list = []
|
| 358 |
+
for ipath in image_path:
|
| 359 |
+
images = load_image(ipath, max_num=args.max_split_tile_num_multi_image)
|
| 360 |
+
#images = load_image(ipath, max_num=args.max_split_tile_num_multi_image).view(1, -1, 3, 448, 448).cuda()
|
| 361 |
+
num_tile_this_image = len(images)
|
| 362 |
+
num_tile_per_image_list.append(num_tile_this_image)
|
| 363 |
+
image_list.append(images)
|
| 364 |
+
image_prompts = image_prompts + '<IMAGE>' + '<pad>' * num_tile_this_image * num_token_per_tile + '</IMAGE>'
|
| 365 |
+
num_tile_per_image_tensor = torch.Tensor(num_tile_per_image_list).long().cuda()
|
| 366 |
+
image_tensor = torch.cat(image_list, dim=0).view(1, -1, 3, 448, 448).cuda()
|
| 367 |
+
|
| 368 |
+
else:
|
| 369 |
+
#images_tensor = load_image(image_path, max_num=args.max_split_tile_num_single_image).view(1, -1, 3, 448, 448).cuda()
|
| 370 |
+
images = load_image(image_path[0], max_num=args.max_split_tile_num_single_image)
|
| 371 |
+
num_tile_this_image = len(images)
|
| 372 |
+
num_tile_per_image_tensor = torch.Tensor([num_tile_this_image]).long().cuda()
|
| 373 |
+
image_tensor = images.view(1, -1, 3, 448, 448).cuda()
|
| 374 |
+
image_prompts = image_prompts + '<IMAGE>' + '<pad>' * num_tile_this_image * num_token_per_tile + '</IMAGE>'
|
| 375 |
+
|
| 376 |
+
if args.fp16:
|
| 377 |
+
image_tensor = image_tensor.half()
|
| 378 |
+
elif args.bf16:
|
| 379 |
+
image_tensor = image_tensor.bfloat16()
|
| 380 |
+
else:
|
| 381 |
+
image_tensor = image_tensor.float()
|
| 382 |
+
|
| 383 |
+
images_input = {'num_tile_per_image_tensor': num_tile_per_image_tensor,
|
| 384 |
+
'image_tensor': image_tensor}
|
| 385 |
+
|
| 386 |
+
prompts = ['<BOS>' + image_prompts + question[0] + '<sep>']
|
| 387 |
+
|
| 388 |
+
return prompts, images_input
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def _build_yuanvl_attention_mask_and_position_ids(tokenizer, tokens, images_input=None):
|
| 392 |
+
"""Build the attention mask and postition ids for the input tokens."""
|
| 393 |
+
|
| 394 |
+
# Since we are not interested in loss-mask and reset attention/position
|
| 395 |
+
# is also False, eod_token is not used so it is safe to set it to None.
|
| 396 |
+
|
| 397 |
+
bos_token, image_start_token, image_end_token, pad_token, sep_tpken, eod_token = (tokenizer(tok)['input_ids'][0] for tok in ['<BOS>','<IMAGE>', '</IMAGE>', '<pad>', '<sep>', '<eod>'])
|
| 398 |
+
#eod_token = tokenizer("<eod>")['input_ids'][0]
|
| 399 |
+
|
| 400 |
+
attention_mask, position_ids, image_info = get_ltor_masks_and_position_ids_yuanvl_inference(
|
| 401 |
+
tokens,
|
| 402 |
+
bos_token,
|
| 403 |
+
image_start_token,
|
| 404 |
+
image_end_token,
|
| 405 |
+
eod_token,
|
| 406 |
+
pad_token,
|
| 407 |
+
images_input)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
'''attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
|
| 411 |
+
data=tokens,
|
| 412 |
+
eod_token=None,
|
| 413 |
+
reset_position_ids=False,
|
| 414 |
+
reset_attention_mask=False,
|
| 415 |
+
eod_mask_loss=False)'''
|
| 416 |
+
|
| 417 |
+
return attention_mask, position_ids, image_info
|
| 418 |
+
|
| 419 |
+
def get_ltor_masks_and_position_ids_yuanvl_inference(data,
|
| 420 |
+
bos_token,
|
| 421 |
+
image_start_token,
|
| 422 |
+
image_end_token,
|
| 423 |
+
eod_token,
|
| 424 |
+
pad_token,
|
| 425 |
+
images_input,
|
| 426 |
+
reset_attention_mask=False):
|
| 427 |
+
"""Build masks and position id for left to right model."""
|
| 428 |
+
# Extract batch size and sequence length.
|
| 429 |
+
micro_batch_size, seq_length = data.size()
|
| 430 |
+
assert micro_batch_size == 1, 'yuanvl support mbs = 1 only'
|
| 431 |
+
|
| 432 |
+
# Attention mask (lower triangular).
|
| 433 |
+
if reset_attention_mask:
|
| 434 |
+
att_mask_batch = micro_batch_size
|
| 435 |
+
else:
|
| 436 |
+
att_mask_batch = 1
|
| 437 |
+
attention_mask = torch.tril(torch.ones(
|
| 438 |
+
(att_mask_batch, seq_length, seq_length), device=data.device)).view(
|
| 439 |
+
att_mask_batch, 1, seq_length, seq_length)
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
# Position ids.
|
| 443 |
+
position_ids = torch.arange(seq_length, dtype=torch.long,
|
| 444 |
+
device=data.device)
|
| 445 |
+
position_ids = position_ids.unsqueeze(0).expand_as(data)
|
| 446 |
+
#input_pad = []
|
| 447 |
+
#image_info = {}
|
| 448 |
+
|
| 449 |
+
#import pdb
|
| 450 |
+
#pdb.set_trace()
|
| 451 |
+
#if torch.distributed.get_rank() == 0:
|
| 452 |
+
|
| 453 |
+
#pdb.set_trace()
|
| 454 |
+
if images_input is not None:
|
| 455 |
+
num_tile_per_image_tensor = images_input['num_tile_per_image_tensor']
|
| 456 |
+
images_tensor = images_input['image_tensor']
|
| 457 |
+
input_pad = []
|
| 458 |
+
image_info = {}
|
| 459 |
+
position_ids_use = torch.zeros(data.shape).to(position_ids)
|
| 460 |
+
for b in range(micro_batch_size):
|
| 461 |
+
bos_index = position_ids[b, data[b] == bos_token]
|
| 462 |
+
pad_index = position_ids[b, data[b] == pad_token]
|
| 463 |
+
image_start_index = position_ids[b, data[b] == image_start_token]
|
| 464 |
+
image_end_index = position_ids[b, data[b] == image_end_token]
|
| 465 |
+
#eod_index = position_ids[b, data[b] == eod_token]
|
| 466 |
+
#assert len(bos_index) == len(eod_index)
|
| 467 |
+
num_image = len(num_tile_per_image_tensor)
|
| 468 |
+
|
| 469 |
+
#num_tile = pad_index.shape[0] // clip_visual_size
|
| 470 |
+
#image_info['num_image'] = num_image
|
| 471 |
+
image_info['num_tile'] = num_tile_per_image_tensor
|
| 472 |
+
#image_info['bos_pos'] = bos_index.tolist()
|
| 473 |
+
image_info['image_start_pos'] = image_start_index.tolist()
|
| 474 |
+
#image_info['image_end_pos'] = image_end_index.tolist()
|
| 475 |
+
|
| 476 |
+
#for j in range(image_index.size()[0]):
|
| 477 |
+
# start_idx = image_index[j]
|
| 478 |
+
# diff = seq_length - start_idx
|
| 479 |
+
# position_ids_use[b][start_idx : ] = torch.arange(diff, dtype=torch.long,
|
| 480 |
+
# device=data.device)
|
| 481 |
+
start_idx = image_end_index[-1]
|
| 482 |
+
diff = seq_length - start_idx
|
| 483 |
+
position_ids_use[b][start_idx : ] = torch.arange(diff, dtype=torch.long,
|
| 484 |
+
device=data.device)
|
| 485 |
+
else:
|
| 486 |
+
position_ids = torch.arange(seq_length, dtype=torch.long,
|
| 487 |
+
device=data.device)
|
| 488 |
+
position_ids = position_ids.unsqueeze(0)#.expand_as(data)
|
| 489 |
+
position_ids_use = position_ids
|
| 490 |
+
image_info = None
|
| 491 |
+
#image_info['eod_pos'] = eod_index.tolist()
|
| 492 |
+
#for j in range(bos_index.size()[0]):
|
| 493 |
+
# start_idx = bos_index[j]
|
| 494 |
+
# end_idx = eod_index[j]
|
| 495 |
+
# input_pad = input_pad + [bos_token] + [pad_token] * clip_visual_size + data[b][start_idx + 1 : end_idx + 1].tolist()
|
| 496 |
+
#data_nopad = data[b][:eod_index[j]+1].view(1, -1)
|
| 497 |
+
#input_pad = input_pad + [pad_token]
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
# Position ids.
|
| 501 |
+
#position_ids = torch.arange(seq_length + clip_visual_size * num_image, dtype=torch.long,
|
| 502 |
+
#position_ids = torch.arange(seq_length, dtype=torch.long,
|
| 503 |
+
# device=data.device)
|
| 504 |
+
#position_ids = position_ids.unsqueeze(0)#.expand_as(data)
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
# Convert attention mask to binary:
|
| 509 |
+
attention_mask = (attention_mask < 0.5)
|
| 510 |
+
|
| 511 |
+
'''xattn_position_ids = torch.arange(seq_length, dtype=torch.long,
|
| 512 |
+
device=data.device)
|
| 513 |
+
xattn_position_ids = xattn_position_ids.unsqueeze(0).expand_as(data)
|
| 514 |
+
|
| 515 |
+
for b in range(micro_batch_size):
|
| 516 |
+
|
| 517 |
+
bos_index = xattn_position_ids[b, data[b] == bos_token]
|
| 518 |
+
|
| 519 |
+
num_image = len(bos_index)
|
| 520 |
+
|
| 521 |
+
xattn_mask = torch.zeros((micro_batch_size, seq_length, num_image * clip_visual_size), device = data.device).view(micro_batch_size, 1, seq_length, num_image * clip_visual_size)
|
| 522 |
+
|
| 523 |
+
for j in range(bos_index.size()[0]):
|
| 524 |
+
sidx = bos_index[j]
|
| 525 |
+
|
| 526 |
+
image_sidx = j * clip_visual_size
|
| 527 |
+
image_eidx = (j + 1) * clip_visual_size
|
| 528 |
+
|
| 529 |
+
#xattn_mask[b, 0, (sidx + 1) : , image_sidx : image_eidx] = 1
|
| 530 |
+
xattn_mask[b, 0, sidx : , image_sidx : image_eidx] = 1
|
| 531 |
+
#xattn_mask[b, 0, sidx : (eidx + 1), image_sidx : image_eidx] = 1
|
| 532 |
+
|
| 533 |
+
xattn_mask = (xattn_mask < 0.5)'''
|
| 534 |
+
|
| 535 |
+
return attention_mask, position_ids_use, image_info
|
| 536 |
+
|
| 537 |
+
tokenizer_loadpath = "/mnt/beegfs3/zhaoxudong/code/yuanvl_hf_40B_stage2_pcase4_12pp/"
|
| 538 |
+
model_loadpath = "/mnt/beegfs3/zhaoxudong/code/yuanvl_hf_40B_stage2_pcase4_12pp/"
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
# 加载本地模型
|
| 542 |
+
model = AutoModel.from_pretrained(
|
| 543 |
+
model_loadpath,
|
| 544 |
+
torch_dtype=torch.bfloat16,
|
| 545 |
+
low_cpu_mem_usage=True,
|
| 546 |
+
use_flash_attn=False,
|
| 547 |
+
device_map="auto",
|
| 548 |
+
trust_remote_code=True).eval()
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
print("Creat model finish")
|
| 552 |
+
|
| 553 |
+
# 加载本地 Tokenizer
|
| 554 |
+
tokenizer = AutoTokenizer.from_pretrained(tokenizer_loadpath)
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
num_token_per_tile = int(args.clip_visual_size * args.downsample_ratio**2)
|
| 558 |
+
|
| 559 |
+
# demo 1
|
| 560 |
+
image_path = ['/mnt/beegfs3/zhaoxudong/code/image.jpeg']
|
| 561 |
+
question = ['Please describe the picture']
|
| 562 |
+
question = ['请描述这张图片的内容']
|
| 563 |
+
|
| 564 |
+
prompts, images_input = preocess_imput(args, num_token_per_tile, image_path, question)
|
| 565 |
+
|
| 566 |
+
input=tokenizer(prompts, return_tensors="pt")
|
| 567 |
+
input_ids = input['input_ids'].to("cuda")
|
| 568 |
+
pixel_values=images_input['image_tensor']
|
| 569 |
+
|
| 570 |
+
attention_mask, position_ids, image_info = _build_yuanvl_attention_mask_and_position_ids(
|
| 571 |
+
tokenizer, input_ids, images_input)
|
| 572 |
+
|
| 573 |
+
attention_mask = input['attention_mask'].to("cuda")
|
| 574 |
+
|
| 575 |
+
output = model.generate(pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
|
| 576 |
+
print(tokenizer.decode(output[0]))
|
recipe.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
default_stage:
|
| 2 |
+
default_modifiers:
|
| 3 |
+
GPTQModifier:
|
| 4 |
+
targets: [Linear]
|
| 5 |
+
ignore: ['re:.*lm_head$', 're:.*qkv$', 're:.*fc1$', 're:.*fc2$', 're:.*attn.proj$',
|
| 6 |
+
're:.*up_proj$', 're:.*gate_proj$', 're:.*down_proj$', 're:.*router.query_key_value$']
|
| 7 |
+
scheme: W4A16
|
| 8 |
+
sequential_update: true
|
| 9 |
+
block_size: 128
|
| 10 |
+
dampening_frac: 0.01
|
| 11 |
+
offload_hessians: false
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,1085 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<s>",
|
| 4 |
+
"<eod>",
|
| 5 |
+
"<unk>",
|
| 6 |
+
"<sep>",
|
| 7 |
+
"<pad>",
|
| 8 |
+
"<mask>",
|
| 9 |
+
"<predict>",
|
| 10 |
+
"<FIM_SUFFIX>",
|
| 11 |
+
"<FIM_PREFIX>",
|
| 12 |
+
"<FIM_MIDDLE>",
|
| 13 |
+
"<commit_before>",
|
| 14 |
+
"<commit_msg>",
|
| 15 |
+
"<commit_after>",
|
| 16 |
+
"<jupyter_start>",
|
| 17 |
+
"<jupyter_text>",
|
| 18 |
+
"<jupyter_code>",
|
| 19 |
+
"<jupyter_output>",
|
| 20 |
+
"<empty_output>",
|
| 21 |
+
"<repo_name>",
|
| 22 |
+
"<file_sep>",
|
| 23 |
+
"<BOS>",
|
| 24 |
+
"<IMAGE>",
|
| 25 |
+
"</IMAGE>",
|
| 26 |
+
"<grounding>",
|
| 27 |
+
"<obj>",
|
| 28 |
+
"</obj>",
|
| 29 |
+
"<box>",
|
| 30 |
+
"</box>",
|
| 31 |
+
"<point>",
|
| 32 |
+
"</point>",
|
| 33 |
+
"<3dbox>",
|
| 34 |
+
"</3dbox>",
|
| 35 |
+
"<depth>",
|
| 36 |
+
"</depth>",
|
| 37 |
+
"s000",
|
| 38 |
+
"s001",
|
| 39 |
+
"s002",
|
| 40 |
+
"s003",
|
| 41 |
+
"s004",
|
| 42 |
+
"s005",
|
| 43 |
+
"s006",
|
| 44 |
+
"s007",
|
| 45 |
+
"s008",
|
| 46 |
+
"s009",
|
| 47 |
+
"s010",
|
| 48 |
+
"s011",
|
| 49 |
+
"s012",
|
| 50 |
+
"s013",
|
| 51 |
+
"s014",
|
| 52 |
+
"s015",
|
| 53 |
+
"s016",
|
| 54 |
+
"s017",
|
| 55 |
+
"s018",
|
| 56 |
+
"s019",
|
| 57 |
+
"s020",
|
| 58 |
+
"s021",
|
| 59 |
+
"s022",
|
| 60 |
+
"s023",
|
| 61 |
+
"s024",
|
| 62 |
+
"s025",
|
| 63 |
+
"s026",
|
| 64 |
+
"s027",
|
| 65 |
+
"s028",
|
| 66 |
+
"s029",
|
| 67 |
+
"s030",
|
| 68 |
+
"s031",
|
| 69 |
+
"s032",
|
| 70 |
+
"s033",
|
| 71 |
+
"s034",
|
| 72 |
+
"s035",
|
| 73 |
+
"s036",
|
| 74 |
+
"s037",
|
| 75 |
+
"s038",
|
| 76 |
+
"s039",
|
| 77 |
+
"s040",
|
| 78 |
+
"s041",
|
| 79 |
+
"s042",
|
| 80 |
+
"s043",
|
| 81 |
+
"s044",
|
| 82 |
+
"s045",
|
| 83 |
+
"s046",
|
| 84 |
+
"s047",
|
| 85 |
+
"s048",
|
| 86 |
+
"s049",
|
| 87 |
+
"s050",
|
| 88 |
+
"s051",
|
| 89 |
+
"s052",
|
| 90 |
+
"s053",
|
| 91 |
+
"s054",
|
| 92 |
+
"s055",
|
| 93 |
+
"s056",
|
| 94 |
+
"s057",
|
| 95 |
+
"s058",
|
| 96 |
+
"s059",
|
| 97 |
+
"s060",
|
| 98 |
+
"s061",
|
| 99 |
+
"s062",
|
| 100 |
+
"s063",
|
| 101 |
+
"s064",
|
| 102 |
+
"s065",
|
| 103 |
+
"s066",
|
| 104 |
+
"s067",
|
| 105 |
+
"s068",
|
| 106 |
+
"s069",
|
| 107 |
+
"s070",
|
| 108 |
+
"s071",
|
| 109 |
+
"s072",
|
| 110 |
+
"s073",
|
| 111 |
+
"s074",
|
| 112 |
+
"s075",
|
| 113 |
+
"s076",
|
| 114 |
+
"s077",
|
| 115 |
+
"s078",
|
| 116 |
+
"s079",
|
| 117 |
+
"s080",
|
| 118 |
+
"s081",
|
| 119 |
+
"s082",
|
| 120 |
+
"s083",
|
| 121 |
+
"s084",
|
| 122 |
+
"s085",
|
| 123 |
+
"s086",
|
| 124 |
+
"s087",
|
| 125 |
+
"s088",
|
| 126 |
+
"s089",
|
| 127 |
+
"s090",
|
| 128 |
+
"s091",
|
| 129 |
+
"s092",
|
| 130 |
+
"s093",
|
| 131 |
+
"s094",
|
| 132 |
+
"s095",
|
| 133 |
+
"s096",
|
| 134 |
+
"s097",
|
| 135 |
+
"s098",
|
| 136 |
+
"s099",
|
| 137 |
+
"s100",
|
| 138 |
+
"s101",
|
| 139 |
+
"s102",
|
| 140 |
+
"s103",
|
| 141 |
+
"s104",
|
| 142 |
+
"s105",
|
| 143 |
+
"s106",
|
| 144 |
+
"s107",
|
| 145 |
+
"s108",
|
| 146 |
+
"s109",
|
| 147 |
+
"s110",
|
| 148 |
+
"s111",
|
| 149 |
+
"s112",
|
| 150 |
+
"s113",
|
| 151 |
+
"s114",
|
| 152 |
+
"s115",
|
| 153 |
+
"s116",
|
| 154 |
+
"s117",
|
| 155 |
+
"s118",
|
| 156 |
+
"s119",
|
| 157 |
+
"s120",
|
| 158 |
+
"s121",
|
| 159 |
+
"s122",
|
| 160 |
+
"s123",
|
| 161 |
+
"s124",
|
| 162 |
+
"s125",
|
| 163 |
+
"s126",
|
| 164 |
+
"s127",
|
| 165 |
+
"s128",
|
| 166 |
+
"s129",
|
| 167 |
+
"s130",
|
| 168 |
+
"s131",
|
| 169 |
+
"s132",
|
| 170 |
+
"s133",
|
| 171 |
+
"s134",
|
| 172 |
+
"s135",
|
| 173 |
+
"s136",
|
| 174 |
+
"s137",
|
| 175 |
+
"s138",
|
| 176 |
+
"s139",
|
| 177 |
+
"s140",
|
| 178 |
+
"s141",
|
| 179 |
+
"s142",
|
| 180 |
+
"s143",
|
| 181 |
+
"s144",
|
| 182 |
+
"s145",
|
| 183 |
+
"s146",
|
| 184 |
+
"s147",
|
| 185 |
+
"s148",
|
| 186 |
+
"s149",
|
| 187 |
+
"s150",
|
| 188 |
+
"s151",
|
| 189 |
+
"s152",
|
| 190 |
+
"s153",
|
| 191 |
+
"s154",
|
| 192 |
+
"s155",
|
| 193 |
+
"s156",
|
| 194 |
+
"s157",
|
| 195 |
+
"s158",
|
| 196 |
+
"s159",
|
| 197 |
+
"s160",
|
| 198 |
+
"s161",
|
| 199 |
+
"s162",
|
| 200 |
+
"s163",
|
| 201 |
+
"s164",
|
| 202 |
+
"s165",
|
| 203 |
+
"s166",
|
| 204 |
+
"s167",
|
| 205 |
+
"s168",
|
| 206 |
+
"s169",
|
| 207 |
+
"s170",
|
| 208 |
+
"s171",
|
| 209 |
+
"s172",
|
| 210 |
+
"s173",
|
| 211 |
+
"s174",
|
| 212 |
+
"s175",
|
| 213 |
+
"s176",
|
| 214 |
+
"s177",
|
| 215 |
+
"s178",
|
| 216 |
+
"s179",
|
| 217 |
+
"s180",
|
| 218 |
+
"s181",
|
| 219 |
+
"s182",
|
| 220 |
+
"s183",
|
| 221 |
+
"s184",
|
| 222 |
+
"s185",
|
| 223 |
+
"s186",
|
| 224 |
+
"s187",
|
| 225 |
+
"s188",
|
| 226 |
+
"s189",
|
| 227 |
+
"s190",
|
| 228 |
+
"s191",
|
| 229 |
+
"s192",
|
| 230 |
+
"s193",
|
| 231 |
+
"s194",
|
| 232 |
+
"s195",
|
| 233 |
+
"s196",
|
| 234 |
+
"s197",
|
| 235 |
+
"s198",
|
| 236 |
+
"s199",
|
| 237 |
+
"s200",
|
| 238 |
+
"s201",
|
| 239 |
+
"s202",
|
| 240 |
+
"s203",
|
| 241 |
+
"s204",
|
| 242 |
+
"s205",
|
| 243 |
+
"s206",
|
| 244 |
+
"s207",
|
| 245 |
+
"s208",
|
| 246 |
+
"s209",
|
| 247 |
+
"s210",
|
| 248 |
+
"s211",
|
| 249 |
+
"s212",
|
| 250 |
+
"s213",
|
| 251 |
+
"s214",
|
| 252 |
+
"s215",
|
| 253 |
+
"s216",
|
| 254 |
+
"s217",
|
| 255 |
+
"s218",
|
| 256 |
+
"s219",
|
| 257 |
+
"s220",
|
| 258 |
+
"s221",
|
| 259 |
+
"s222",
|
| 260 |
+
"s223",
|
| 261 |
+
"s224",
|
| 262 |
+
"s225",
|
| 263 |
+
"s226",
|
| 264 |
+
"s227",
|
| 265 |
+
"s228",
|
| 266 |
+
"s229",
|
| 267 |
+
"s230",
|
| 268 |
+
"s231",
|
| 269 |
+
"s232",
|
| 270 |
+
"s233",
|
| 271 |
+
"s234",
|
| 272 |
+
"s235",
|
| 273 |
+
"s236",
|
| 274 |
+
"s237",
|
| 275 |
+
"s238",
|
| 276 |
+
"s239",
|
| 277 |
+
"s240",
|
| 278 |
+
"s241",
|
| 279 |
+
"s242",
|
| 280 |
+
"s243",
|
| 281 |
+
"s244",
|
| 282 |
+
"s245",
|
| 283 |
+
"s246",
|
| 284 |
+
"s247",
|
| 285 |
+
"s248",
|
| 286 |
+
"s249",
|
| 287 |
+
"s250",
|
| 288 |
+
"s251",
|
| 289 |
+
"s252",
|
| 290 |
+
"s253",
|
| 291 |
+
"s254",
|
| 292 |
+
"s255",
|
| 293 |
+
"s256",
|
| 294 |
+
"s257",
|
| 295 |
+
"s258",
|
| 296 |
+
"s259",
|
| 297 |
+
"s260",
|
| 298 |
+
"s261",
|
| 299 |
+
"s262",
|
| 300 |
+
"s263",
|
| 301 |
+
"s264",
|
| 302 |
+
"s265",
|
| 303 |
+
"s266",
|
| 304 |
+
"s267",
|
| 305 |
+
"s268",
|
| 306 |
+
"s269",
|
| 307 |
+
"s270",
|
| 308 |
+
"s271",
|
| 309 |
+
"s272",
|
| 310 |
+
"s273",
|
| 311 |
+
"s274",
|
| 312 |
+
"s275",
|
| 313 |
+
"s276",
|
| 314 |
+
"s277",
|
| 315 |
+
"s278",
|
| 316 |
+
"s279",
|
| 317 |
+
"s280",
|
| 318 |
+
"s281",
|
| 319 |
+
"s282",
|
| 320 |
+
"s283",
|
| 321 |
+
"s284",
|
| 322 |
+
"s285",
|
| 323 |
+
"s286",
|
| 324 |
+
"s287",
|
| 325 |
+
"s288",
|
| 326 |
+
"s289",
|
| 327 |
+
"s290",
|
| 328 |
+
"s291",
|
| 329 |
+
"s292",
|
| 330 |
+
"s293",
|
| 331 |
+
"s294",
|
| 332 |
+
"s295",
|
| 333 |
+
"s296",
|
| 334 |
+
"s297",
|
| 335 |
+
"s298",
|
| 336 |
+
"s299",
|
| 337 |
+
"s300",
|
| 338 |
+
"s301",
|
| 339 |
+
"s302",
|
| 340 |
+
"s303",
|
| 341 |
+
"s304",
|
| 342 |
+
"s305",
|
| 343 |
+
"s306",
|
| 344 |
+
"s307",
|
| 345 |
+
"s308",
|
| 346 |
+
"s309",
|
| 347 |
+
"s310",
|
| 348 |
+
"s311",
|
| 349 |
+
"s312",
|
| 350 |
+
"s313",
|
| 351 |
+
"s314",
|
| 352 |
+
"s315",
|
| 353 |
+
"s316",
|
| 354 |
+
"s317",
|
| 355 |
+
"s318",
|
| 356 |
+
"s319",
|
| 357 |
+
"s320",
|
| 358 |
+
"s321",
|
| 359 |
+
"s322",
|
| 360 |
+
"s323",
|
| 361 |
+
"s324",
|
| 362 |
+
"s325",
|
| 363 |
+
"s326",
|
| 364 |
+
"s327",
|
| 365 |
+
"s328",
|
| 366 |
+
"s329",
|
| 367 |
+
"s330",
|
| 368 |
+
"s331",
|
| 369 |
+
"s332",
|
| 370 |
+
"s333",
|
| 371 |
+
"s334",
|
| 372 |
+
"s335",
|
| 373 |
+
"s336",
|
| 374 |
+
"s337",
|
| 375 |
+
"s338",
|
| 376 |
+
"s339",
|
| 377 |
+
"s340",
|
| 378 |
+
"s341",
|
| 379 |
+
"s342",
|
| 380 |
+
"s343",
|
| 381 |
+
"s344",
|
| 382 |
+
"s345",
|
| 383 |
+
"s346",
|
| 384 |
+
"s347",
|
| 385 |
+
"s348",
|
| 386 |
+
"s349",
|
| 387 |
+
"s350",
|
| 388 |
+
"s351",
|
| 389 |
+
"s352",
|
| 390 |
+
"s353",
|
| 391 |
+
"s354",
|
| 392 |
+
"s355",
|
| 393 |
+
"s356",
|
| 394 |
+
"s357",
|
| 395 |
+
"s358",
|
| 396 |
+
"s359",
|
| 397 |
+
"s360",
|
| 398 |
+
"s361",
|
| 399 |
+
"s362",
|
| 400 |
+
"s363",
|
| 401 |
+
"s364",
|
| 402 |
+
"s365",
|
| 403 |
+
"s366",
|
| 404 |
+
"s367",
|
| 405 |
+
"s368",
|
| 406 |
+
"s369",
|
| 407 |
+
"s370",
|
| 408 |
+
"s371",
|
| 409 |
+
"s372",
|
| 410 |
+
"s373",
|
| 411 |
+
"s374",
|
| 412 |
+
"s375",
|
| 413 |
+
"s376",
|
| 414 |
+
"s377",
|
| 415 |
+
"s378",
|
| 416 |
+
"s379",
|
| 417 |
+
"s380",
|
| 418 |
+
"s381",
|
| 419 |
+
"s382",
|
| 420 |
+
"s383",
|
| 421 |
+
"s384",
|
| 422 |
+
"s385",
|
| 423 |
+
"s386",
|
| 424 |
+
"s387",
|
| 425 |
+
"s388",
|
| 426 |
+
"s389",
|
| 427 |
+
"s390",
|
| 428 |
+
"s391",
|
| 429 |
+
"s392",
|
| 430 |
+
"s393",
|
| 431 |
+
"s394",
|
| 432 |
+
"s395",
|
| 433 |
+
"s396",
|
| 434 |
+
"s397",
|
| 435 |
+
"s398",
|
| 436 |
+
"s399",
|
| 437 |
+
"s400",
|
| 438 |
+
"s401",
|
| 439 |
+
"s402",
|
| 440 |
+
"s403",
|
| 441 |
+
"s404",
|
| 442 |
+
"s405",
|
| 443 |
+
"s406",
|
| 444 |
+
"s407",
|
| 445 |
+
"s408",
|
| 446 |
+
"s409",
|
| 447 |
+
"s410",
|
| 448 |
+
"s411",
|
| 449 |
+
"s412",
|
| 450 |
+
"s413",
|
| 451 |
+
"s414",
|
| 452 |
+
"s415",
|
| 453 |
+
"s416",
|
| 454 |
+
"s417",
|
| 455 |
+
"s418",
|
| 456 |
+
"s419",
|
| 457 |
+
"s420",
|
| 458 |
+
"s421",
|
| 459 |
+
"s422",
|
| 460 |
+
"s423",
|
| 461 |
+
"s424",
|
| 462 |
+
"s425",
|
| 463 |
+
"s426",
|
| 464 |
+
"s427",
|
| 465 |
+
"s428",
|
| 466 |
+
"s429",
|
| 467 |
+
"s430",
|
| 468 |
+
"s431",
|
| 469 |
+
"s432",
|
| 470 |
+
"s433",
|
| 471 |
+
"s434",
|
| 472 |
+
"s435",
|
| 473 |
+
"s436",
|
| 474 |
+
"s437",
|
| 475 |
+
"s438",
|
| 476 |
+
"s439",
|
| 477 |
+
"s440",
|
| 478 |
+
"s441",
|
| 479 |
+
"s442",
|
| 480 |
+
"s443",
|
| 481 |
+
"s444",
|
| 482 |
+
"s445",
|
| 483 |
+
"s446",
|
| 484 |
+
"s447",
|
| 485 |
+
"s448",
|
| 486 |
+
"s449",
|
| 487 |
+
"s450",
|
| 488 |
+
"s451",
|
| 489 |
+
"s452",
|
| 490 |
+
"s453",
|
| 491 |
+
"s454",
|
| 492 |
+
"s455",
|
| 493 |
+
"s456",
|
| 494 |
+
"s457",
|
| 495 |
+
"s458",
|
| 496 |
+
"s459",
|
| 497 |
+
"s460",
|
| 498 |
+
"s461",
|
| 499 |
+
"s462",
|
| 500 |
+
"s463",
|
| 501 |
+
"s464",
|
| 502 |
+
"s465",
|
| 503 |
+
"s466",
|
| 504 |
+
"s467",
|
| 505 |
+
"s468",
|
| 506 |
+
"s469",
|
| 507 |
+
"s470",
|
| 508 |
+
"s471",
|
| 509 |
+
"s472",
|
| 510 |
+
"s473",
|
| 511 |
+
"s474",
|
| 512 |
+
"s475",
|
| 513 |
+
"s476",
|
| 514 |
+
"s477",
|
| 515 |
+
"s478",
|
| 516 |
+
"s479",
|
| 517 |
+
"s480",
|
| 518 |
+
"s481",
|
| 519 |
+
"s482",
|
| 520 |
+
"s483",
|
| 521 |
+
"s484",
|
| 522 |
+
"s485",
|
| 523 |
+
"s486",
|
| 524 |
+
"s487",
|
| 525 |
+
"s488",
|
| 526 |
+
"s489",
|
| 527 |
+
"s490",
|
| 528 |
+
"s491",
|
| 529 |
+
"s492",
|
| 530 |
+
"s493",
|
| 531 |
+
"s494",
|
| 532 |
+
"s495",
|
| 533 |
+
"s496",
|
| 534 |
+
"s497",
|
| 535 |
+
"s498",
|
| 536 |
+
"s499",
|
| 537 |
+
"s500",
|
| 538 |
+
"s501",
|
| 539 |
+
"s502",
|
| 540 |
+
"s503",
|
| 541 |
+
"s504",
|
| 542 |
+
"s505",
|
| 543 |
+
"s506",
|
| 544 |
+
"s507",
|
| 545 |
+
"s508",
|
| 546 |
+
"s509",
|
| 547 |
+
"s510",
|
| 548 |
+
"s511",
|
| 549 |
+
"s512",
|
| 550 |
+
"s513",
|
| 551 |
+
"s514",
|
| 552 |
+
"s515",
|
| 553 |
+
"s516",
|
| 554 |
+
"s517",
|
| 555 |
+
"s518",
|
| 556 |
+
"s519",
|
| 557 |
+
"s520",
|
| 558 |
+
"s521",
|
| 559 |
+
"s522",
|
| 560 |
+
"s523",
|
| 561 |
+
"s524",
|
| 562 |
+
"s525",
|
| 563 |
+
"s526",
|
| 564 |
+
"s527",
|
| 565 |
+
"s528",
|
| 566 |
+
"s529",
|
| 567 |
+
"s530",
|
| 568 |
+
"s531",
|
| 569 |
+
"s532",
|
| 570 |
+
"s533",
|
| 571 |
+
"s534",
|
| 572 |
+
"s535",
|
| 573 |
+
"s536",
|
| 574 |
+
"s537",
|
| 575 |
+
"s538",
|
| 576 |
+
"s539",
|
| 577 |
+
"s540",
|
| 578 |
+
"s541",
|
| 579 |
+
"s542",
|
| 580 |
+
"s543",
|
| 581 |
+
"s544",
|
| 582 |
+
"s545",
|
| 583 |
+
"s546",
|
| 584 |
+
"s547",
|
| 585 |
+
"s548",
|
| 586 |
+
"s549",
|
| 587 |
+
"s550",
|
| 588 |
+
"s551",
|
| 589 |
+
"s552",
|
| 590 |
+
"s553",
|
| 591 |
+
"s554",
|
| 592 |
+
"s555",
|
| 593 |
+
"s556",
|
| 594 |
+
"s557",
|
| 595 |
+
"s558",
|
| 596 |
+
"s559",
|
| 597 |
+
"s560",
|
| 598 |
+
"s561",
|
| 599 |
+
"s562",
|
| 600 |
+
"s563",
|
| 601 |
+
"s564",
|
| 602 |
+
"s565",
|
| 603 |
+
"s566",
|
| 604 |
+
"s567",
|
| 605 |
+
"s568",
|
| 606 |
+
"s569",
|
| 607 |
+
"s570",
|
| 608 |
+
"s571",
|
| 609 |
+
"s572",
|
| 610 |
+
"s573",
|
| 611 |
+
"s574",
|
| 612 |
+
"s575",
|
| 613 |
+
"s576",
|
| 614 |
+
"s577",
|
| 615 |
+
"s578",
|
| 616 |
+
"s579",
|
| 617 |
+
"s580",
|
| 618 |
+
"s581",
|
| 619 |
+
"s582",
|
| 620 |
+
"s583",
|
| 621 |
+
"s584",
|
| 622 |
+
"s585",
|
| 623 |
+
"s586",
|
| 624 |
+
"s587",
|
| 625 |
+
"s588",
|
| 626 |
+
"s589",
|
| 627 |
+
"s590",
|
| 628 |
+
"s591",
|
| 629 |
+
"s592",
|
| 630 |
+
"s593",
|
| 631 |
+
"s594",
|
| 632 |
+
"s595",
|
| 633 |
+
"s596",
|
| 634 |
+
"s597",
|
| 635 |
+
"s598",
|
| 636 |
+
"s599",
|
| 637 |
+
"s600",
|
| 638 |
+
"s601",
|
| 639 |
+
"s602",
|
| 640 |
+
"s603",
|
| 641 |
+
"s604",
|
| 642 |
+
"s605",
|
| 643 |
+
"s606",
|
| 644 |
+
"s607",
|
| 645 |
+
"s608",
|
| 646 |
+
"s609",
|
| 647 |
+
"s610",
|
| 648 |
+
"s611",
|
| 649 |
+
"s612",
|
| 650 |
+
"s613",
|
| 651 |
+
"s614",
|
| 652 |
+
"s615",
|
| 653 |
+
"s616",
|
| 654 |
+
"s617",
|
| 655 |
+
"s618",
|
| 656 |
+
"s619",
|
| 657 |
+
"s620",
|
| 658 |
+
"s621",
|
| 659 |
+
"s622",
|
| 660 |
+
"s623",
|
| 661 |
+
"s624",
|
| 662 |
+
"s625",
|
| 663 |
+
"s626",
|
| 664 |
+
"s627",
|
| 665 |
+
"s628",
|
| 666 |
+
"s629",
|
| 667 |
+
"s630",
|
| 668 |
+
"s631",
|
| 669 |
+
"s632",
|
| 670 |
+
"s633",
|
| 671 |
+
"s634",
|
| 672 |
+
"s635",
|
| 673 |
+
"s636",
|
| 674 |
+
"s637",
|
| 675 |
+
"s638",
|
| 676 |
+
"s639",
|
| 677 |
+
"s640",
|
| 678 |
+
"s641",
|
| 679 |
+
"s642",
|
| 680 |
+
"s643",
|
| 681 |
+
"s644",
|
| 682 |
+
"s645",
|
| 683 |
+
"s646",
|
| 684 |
+
"s647",
|
| 685 |
+
"s648",
|
| 686 |
+
"s649",
|
| 687 |
+
"s650",
|
| 688 |
+
"s651",
|
| 689 |
+
"s652",
|
| 690 |
+
"s653",
|
| 691 |
+
"s654",
|
| 692 |
+
"s655",
|
| 693 |
+
"s656",
|
| 694 |
+
"s657",
|
| 695 |
+
"s658",
|
| 696 |
+
"s659",
|
| 697 |
+
"s660",
|
| 698 |
+
"s661",
|
| 699 |
+
"s662",
|
| 700 |
+
"s663",
|
| 701 |
+
"s664",
|
| 702 |
+
"s665",
|
| 703 |
+
"s666",
|
| 704 |
+
"s667",
|
| 705 |
+
"s668",
|
| 706 |
+
"s669",
|
| 707 |
+
"s670",
|
| 708 |
+
"s671",
|
| 709 |
+
"s672",
|
| 710 |
+
"s673",
|
| 711 |
+
"s674",
|
| 712 |
+
"s675",
|
| 713 |
+
"s676",
|
| 714 |
+
"s677",
|
| 715 |
+
"s678",
|
| 716 |
+
"s679",
|
| 717 |
+
"s680",
|
| 718 |
+
"s681",
|
| 719 |
+
"s682",
|
| 720 |
+
"s683",
|
| 721 |
+
"s684",
|
| 722 |
+
"s685",
|
| 723 |
+
"s686",
|
| 724 |
+
"s687",
|
| 725 |
+
"s688",
|
| 726 |
+
"s689",
|
| 727 |
+
"s690",
|
| 728 |
+
"s691",
|
| 729 |
+
"s692",
|
| 730 |
+
"s693",
|
| 731 |
+
"s694",
|
| 732 |
+
"s695",
|
| 733 |
+
"s696",
|
| 734 |
+
"s697",
|
| 735 |
+
"s698",
|
| 736 |
+
"s699",
|
| 737 |
+
"s700",
|
| 738 |
+
"s701",
|
| 739 |
+
"s702",
|
| 740 |
+
"s703",
|
| 741 |
+
"s704",
|
| 742 |
+
"s705",
|
| 743 |
+
"s706",
|
| 744 |
+
"s707",
|
| 745 |
+
"s708",
|
| 746 |
+
"s709",
|
| 747 |
+
"s710",
|
| 748 |
+
"s711",
|
| 749 |
+
"s712",
|
| 750 |
+
"s713",
|
| 751 |
+
"s714",
|
| 752 |
+
"s715",
|
| 753 |
+
"s716",
|
| 754 |
+
"s717",
|
| 755 |
+
"s718",
|
| 756 |
+
"s719",
|
| 757 |
+
"s720",
|
| 758 |
+
"s721",
|
| 759 |
+
"s722",
|
| 760 |
+
"s723",
|
| 761 |
+
"s724",
|
| 762 |
+
"s725",
|
| 763 |
+
"s726",
|
| 764 |
+
"s727",
|
| 765 |
+
"s728",
|
| 766 |
+
"s729",
|
| 767 |
+
"s730",
|
| 768 |
+
"s731",
|
| 769 |
+
"s732",
|
| 770 |
+
"s733",
|
| 771 |
+
"s734",
|
| 772 |
+
"s735",
|
| 773 |
+
"s736",
|
| 774 |
+
"s737",
|
| 775 |
+
"s738",
|
| 776 |
+
"s739",
|
| 777 |
+
"s740",
|
| 778 |
+
"s741",
|
| 779 |
+
"s742",
|
| 780 |
+
"s743",
|
| 781 |
+
"s744",
|
| 782 |
+
"s745",
|
| 783 |
+
"s746",
|
| 784 |
+
"s747",
|
| 785 |
+
"s748",
|
| 786 |
+
"s749",
|
| 787 |
+
"s750",
|
| 788 |
+
"s751",
|
| 789 |
+
"s752",
|
| 790 |
+
"s753",
|
| 791 |
+
"s754",
|
| 792 |
+
"s755",
|
| 793 |
+
"s756",
|
| 794 |
+
"s757",
|
| 795 |
+
"s758",
|
| 796 |
+
"s759",
|
| 797 |
+
"s760",
|
| 798 |
+
"s761",
|
| 799 |
+
"s762",
|
| 800 |
+
"s763",
|
| 801 |
+
"s764",
|
| 802 |
+
"s765",
|
| 803 |
+
"s766",
|
| 804 |
+
"s767",
|
| 805 |
+
"s768",
|
| 806 |
+
"s769",
|
| 807 |
+
"s770",
|
| 808 |
+
"s771",
|
| 809 |
+
"s772",
|
| 810 |
+
"s773",
|
| 811 |
+
"s774",
|
| 812 |
+
"s775",
|
| 813 |
+
"s776",
|
| 814 |
+
"s777",
|
| 815 |
+
"s778",
|
| 816 |
+
"s779",
|
| 817 |
+
"s780",
|
| 818 |
+
"s781",
|
| 819 |
+
"s782",
|
| 820 |
+
"s783",
|
| 821 |
+
"s784",
|
| 822 |
+
"s785",
|
| 823 |
+
"s786",
|
| 824 |
+
"s787",
|
| 825 |
+
"s788",
|
| 826 |
+
"s789",
|
| 827 |
+
"s790",
|
| 828 |
+
"s791",
|
| 829 |
+
"s792",
|
| 830 |
+
"s793",
|
| 831 |
+
"s794",
|
| 832 |
+
"s795",
|
| 833 |
+
"s796",
|
| 834 |
+
"s797",
|
| 835 |
+
"s798",
|
| 836 |
+
"s799",
|
| 837 |
+
"s800",
|
| 838 |
+
"s801",
|
| 839 |
+
"s802",
|
| 840 |
+
"s803",
|
| 841 |
+
"s804",
|
| 842 |
+
"s805",
|
| 843 |
+
"s806",
|
| 844 |
+
"s807",
|
| 845 |
+
"s808",
|
| 846 |
+
"s809",
|
| 847 |
+
"s810",
|
| 848 |
+
"s811",
|
| 849 |
+
"s812",
|
| 850 |
+
"s813",
|
| 851 |
+
"s814",
|
| 852 |
+
"s815",
|
| 853 |
+
"s816",
|
| 854 |
+
"s817",
|
| 855 |
+
"s818",
|
| 856 |
+
"s819",
|
| 857 |
+
"s820",
|
| 858 |
+
"s821",
|
| 859 |
+
"s822",
|
| 860 |
+
"s823",
|
| 861 |
+
"s824",
|
| 862 |
+
"s825",
|
| 863 |
+
"s826",
|
| 864 |
+
"s827",
|
| 865 |
+
"s828",
|
| 866 |
+
"s829",
|
| 867 |
+
"s830",
|
| 868 |
+
"s831",
|
| 869 |
+
"s832",
|
| 870 |
+
"s833",
|
| 871 |
+
"s834",
|
| 872 |
+
"s835",
|
| 873 |
+
"s836",
|
| 874 |
+
"s837",
|
| 875 |
+
"s838",
|
| 876 |
+
"s839",
|
| 877 |
+
"s840",
|
| 878 |
+
"s841",
|
| 879 |
+
"s842",
|
| 880 |
+
"s843",
|
| 881 |
+
"s844",
|
| 882 |
+
"s845",
|
| 883 |
+
"s846",
|
| 884 |
+
"s847",
|
| 885 |
+
"s848",
|
| 886 |
+
"s849",
|
| 887 |
+
"s850",
|
| 888 |
+
"s851",
|
| 889 |
+
"s852",
|
| 890 |
+
"s853",
|
| 891 |
+
"s854",
|
| 892 |
+
"s855",
|
| 893 |
+
"s856",
|
| 894 |
+
"s857",
|
| 895 |
+
"s858",
|
| 896 |
+
"s859",
|
| 897 |
+
"s860",
|
| 898 |
+
"s861",
|
| 899 |
+
"s862",
|
| 900 |
+
"s863",
|
| 901 |
+
"s864",
|
| 902 |
+
"s865",
|
| 903 |
+
"s866",
|
| 904 |
+
"s867",
|
| 905 |
+
"s868",
|
| 906 |
+
"s869",
|
| 907 |
+
"s870",
|
| 908 |
+
"s871",
|
| 909 |
+
"s872",
|
| 910 |
+
"s873",
|
| 911 |
+
"s874",
|
| 912 |
+
"s875",
|
| 913 |
+
"s876",
|
| 914 |
+
"s877",
|
| 915 |
+
"s878",
|
| 916 |
+
"s879",
|
| 917 |
+
"s880",
|
| 918 |
+
"s881",
|
| 919 |
+
"s882",
|
| 920 |
+
"s883",
|
| 921 |
+
"s884",
|
| 922 |
+
"s885",
|
| 923 |
+
"s886",
|
| 924 |
+
"s887",
|
| 925 |
+
"s888",
|
| 926 |
+
"s889",
|
| 927 |
+
"s890",
|
| 928 |
+
"s891",
|
| 929 |
+
"s892",
|
| 930 |
+
"s893",
|
| 931 |
+
"s894",
|
| 932 |
+
"s895",
|
| 933 |
+
"s896",
|
| 934 |
+
"s897",
|
| 935 |
+
"s898",
|
| 936 |
+
"s899",
|
| 937 |
+
"s900",
|
| 938 |
+
"s901",
|
| 939 |
+
"s902",
|
| 940 |
+
"s903",
|
| 941 |
+
"s904",
|
| 942 |
+
"s905",
|
| 943 |
+
"s906",
|
| 944 |
+
"s907",
|
| 945 |
+
"s908",
|
| 946 |
+
"s909",
|
| 947 |
+
"s910",
|
| 948 |
+
"s911",
|
| 949 |
+
"s912",
|
| 950 |
+
"s913",
|
| 951 |
+
"s914",
|
| 952 |
+
"s915",
|
| 953 |
+
"s916",
|
| 954 |
+
"s917",
|
| 955 |
+
"s918",
|
| 956 |
+
"s919",
|
| 957 |
+
"s920",
|
| 958 |
+
"s921",
|
| 959 |
+
"s922",
|
| 960 |
+
"s923",
|
| 961 |
+
"s924",
|
| 962 |
+
"s925",
|
| 963 |
+
"s926",
|
| 964 |
+
"s927",
|
| 965 |
+
"s928",
|
| 966 |
+
"s929",
|
| 967 |
+
"s930",
|
| 968 |
+
"s931",
|
| 969 |
+
"s932",
|
| 970 |
+
"s933",
|
| 971 |
+
"s934",
|
| 972 |
+
"s935",
|
| 973 |
+
"s936",
|
| 974 |
+
"s937",
|
| 975 |
+
"s938",
|
| 976 |
+
"s939",
|
| 977 |
+
"s940",
|
| 978 |
+
"s941",
|
| 979 |
+
"s942",
|
| 980 |
+
"s943",
|
| 981 |
+
"s944",
|
| 982 |
+
"s945",
|
| 983 |
+
"s946",
|
| 984 |
+
"s947",
|
| 985 |
+
"s948",
|
| 986 |
+
"s949",
|
| 987 |
+
"s950",
|
| 988 |
+
"s951",
|
| 989 |
+
"s952",
|
| 990 |
+
"s953",
|
| 991 |
+
"s954",
|
| 992 |
+
"s955",
|
| 993 |
+
"s956",
|
| 994 |
+
"s957",
|
| 995 |
+
"s958",
|
| 996 |
+
"s959",
|
| 997 |
+
"s960",
|
| 998 |
+
"s961",
|
| 999 |
+
"s962",
|
| 1000 |
+
"s963",
|
| 1001 |
+
"s964",
|
| 1002 |
+
"s965",
|
| 1003 |
+
"s966",
|
| 1004 |
+
"s967",
|
| 1005 |
+
"s968",
|
| 1006 |
+
"s969",
|
| 1007 |
+
"s970",
|
| 1008 |
+
"s971",
|
| 1009 |
+
"s972",
|
| 1010 |
+
"s973",
|
| 1011 |
+
"s974",
|
| 1012 |
+
"s975",
|
| 1013 |
+
"s976",
|
| 1014 |
+
"s977",
|
| 1015 |
+
"s978",
|
| 1016 |
+
"s979",
|
| 1017 |
+
"s980",
|
| 1018 |
+
"s981",
|
| 1019 |
+
"s982",
|
| 1020 |
+
"s983",
|
| 1021 |
+
"s984",
|
| 1022 |
+
"s985",
|
| 1023 |
+
"s986",
|
| 1024 |
+
"s987",
|
| 1025 |
+
"s988",
|
| 1026 |
+
"s989",
|
| 1027 |
+
"s990",
|
| 1028 |
+
"s991",
|
| 1029 |
+
"s992",
|
| 1030 |
+
"s993",
|
| 1031 |
+
"s994",
|
| 1032 |
+
"s995",
|
| 1033 |
+
"s996",
|
| 1034 |
+
"s997",
|
| 1035 |
+
"s998",
|
| 1036 |
+
"s999",
|
| 1037 |
+
"<eop>",
|
| 1038 |
+
"<eog>",
|
| 1039 |
+
"<|begin_of_sentence|>",
|
| 1040 |
+
"<|end_of_sentence|>",
|
| 1041 |
+
"<|User|>",
|
| 1042 |
+
"<|Assistant|>",
|
| 1043 |
+
"<think>",
|
| 1044 |
+
"</think>",
|
| 1045 |
+
"<search_result>",
|
| 1046 |
+
"</search_result>",
|
| 1047 |
+
"<search_query>",
|
| 1048 |
+
"</search_query>",
|
| 1049 |
+
"<code_query>",
|
| 1050 |
+
"</code_query>",
|
| 1051 |
+
"<code_result>",
|
| 1052 |
+
"</code_result>",
|
| 1053 |
+
"<infer>",
|
| 1054 |
+
"</infer>",
|
| 1055 |
+
"<inferresult>",
|
| 1056 |
+
"</inferresult>",
|
| 1057 |
+
"<tool_calls>",
|
| 1058 |
+
"</tool_calls>",
|
| 1059 |
+
"<tool_response>",
|
| 1060 |
+
"</tool_response>",
|
| 1061 |
+
"<final_answer>",
|
| 1062 |
+
"</final_answer>"
|
| 1063 |
+
],
|
| 1064 |
+
"bos_token": {
|
| 1065 |
+
"content": "<s>",
|
| 1066 |
+
"lstrip": false,
|
| 1067 |
+
"normalized": false,
|
| 1068 |
+
"rstrip": false,
|
| 1069 |
+
"single_word": false
|
| 1070 |
+
},
|
| 1071 |
+
"eos_token": {
|
| 1072 |
+
"content": "<eod>",
|
| 1073 |
+
"lstrip": false,
|
| 1074 |
+
"normalized": false,
|
| 1075 |
+
"rstrip": false,
|
| 1076 |
+
"single_word": false
|
| 1077 |
+
},
|
| 1078 |
+
"unk_token": {
|
| 1079 |
+
"content": "<unk>",
|
| 1080 |
+
"lstrip": false,
|
| 1081 |
+
"normalized": false,
|
| 1082 |
+
"rstrip": false,
|
| 1083 |
+
"single_word": false
|
| 1084 |
+
}
|
| 1085 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c84861a800c30e71099d63dca0963edbacf554586527ac037155a0560e2fb04
|
| 3 |
+
size 14976038
|
tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:36f79e0c70f73cdd2a8dd0fbe7bfe290da158eea746778d289e4ad76c8b383d9
|
| 3 |
+
size 2155861
|
tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
utils.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import (Callable, Dict, Iterable, List, Literal, Mapping, Optional, Protocol, Set, Tuple, Union, overload)
|
| 2 |
+
import torch
|
| 3 |
+
from torch.func import functional_call
|
| 4 |
+
|
| 5 |
+
@overload
|
| 6 |
+
def flatten_bn(x: torch.Tensor) -> torch.Tensor:
|
| 7 |
+
...
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@overload
|
| 11 |
+
def flatten_bn(x: List[torch.Tensor]) -> List[torch.Tensor]:
|
| 12 |
+
...
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@overload
|
| 16 |
+
def flatten_bn(
|
| 17 |
+
x: Union[List[torch.Tensor], torch.Tensor],
|
| 18 |
+
*,
|
| 19 |
+
concat: Literal[True],
|
| 20 |
+
) -> torch.Tensor:
|
| 21 |
+
...
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@overload
|
| 25 |
+
def flatten_bn(
|
| 26 |
+
x: Union[List[torch.Tensor], torch.Tensor],
|
| 27 |
+
*,
|
| 28 |
+
concat: bool = False,
|
| 29 |
+
) -> Union[List[torch.Tensor], torch.Tensor]:
|
| 30 |
+
...
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def flatten_bn(
|
| 34 |
+
x: Union[List[torch.Tensor], torch.Tensor],
|
| 35 |
+
*,
|
| 36 |
+
concat: bool = False,
|
| 37 |
+
) -> Union[List[torch.Tensor], torch.Tensor]:
|
| 38 |
+
"""
|
| 39 |
+
Flatten the ``B`` and ``N`` dimensions of batched multimodal inputs.
|
| 40 |
+
|
| 41 |
+
The input tensor should have shape ``(B, N, ...)```.
|
| 42 |
+
"""
|
| 43 |
+
if isinstance(x, torch.Tensor):
|
| 44 |
+
return x.flatten(0, 1)
|
| 45 |
+
|
| 46 |
+
if concat:
|
| 47 |
+
return torch.cat(x)
|
| 48 |
+
|
| 49 |
+
return [x_n for x_b in x for x_n in x_b]
|
| 50 |
+
|
| 51 |
+
def _flatten_embeddings(embeddings: torch.Tensor) -> torch.Tensor:
|
| 52 |
+
"""
|
| 53 |
+
Recursively flattens and concatenates NestedTensors on all but the last
|
| 54 |
+
dimension.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
if isinstance(embeddings, torch.Tensor):
|
| 58 |
+
# Flatten all but the last dimension.
|
| 59 |
+
return embeddings.flatten(0, -2)
|
| 60 |
+
|
| 61 |
+
return torch.cat(tuple(_flatten_embeddings(t) for t in embeddings))
|
| 62 |
+
|
| 63 |
+
def _embedding_count_expression(embeddings: torch.Tensor) -> str:
|
| 64 |
+
"""
|
| 65 |
+
Constructs a debugging representation of the number of embeddings in the
|
| 66 |
+
Tensors.
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
if isinstance(embeddings, torch.Tensor):
|
| 70 |
+
return " x ".join([str(dim) for dim in embeddings.shape[:-1]])
|
| 71 |
+
|
| 72 |
+
return " + ".join(
|
| 73 |
+
_embedding_count_expression(inner) for inner in embeddings)
|
| 74 |
+
|
| 75 |
+
def _merge_multimodal_embeddings(
|
| 76 |
+
inputs_embeds: torch.Tensor,
|
| 77 |
+
is_multimodal: torch.Tensor,
|
| 78 |
+
multimodal_embeddings: torch.Tensor,
|
| 79 |
+
) -> torch.Tensor:
|
| 80 |
+
"""
|
| 81 |
+
Merge ``multimodal_embeddings`` into ``inputs_embeds`` by overwriting the
|
| 82 |
+
positions in ``inputs_embeds`` corresponding to placeholder tokens in
|
| 83 |
+
``input_ids``.
|
| 84 |
+
|
| 85 |
+
Note:
|
| 86 |
+
This updates ``inputs_embeds`` in place.
|
| 87 |
+
"""
|
| 88 |
+
num_expected_tokens = is_multimodal.sum().item()
|
| 89 |
+
assert isinstance(num_expected_tokens, int)
|
| 90 |
+
# [total_patches, text_config.hidden_size]
|
| 91 |
+
flattened = _flatten_embeddings(multimodal_embeddings)
|
| 92 |
+
if flattened.shape[0] != num_expected_tokens:
|
| 93 |
+
expr = _embedding_count_expression(multimodal_embeddings)
|
| 94 |
+
raise ValueError(
|
| 95 |
+
f"Attempted to assign {expr} = {flattened.shape[0]} "
|
| 96 |
+
f"multimodal tokens to {num_expected_tokens} placeholders")
|
| 97 |
+
|
| 98 |
+
inputs_embeds[is_multimodal] = flattened
|
| 99 |
+
return inputs_embeds
|
| 100 |
+
|
| 101 |
+
def merge_multimodal_embeddings(
|
| 102 |
+
input_ids: torch.Tensor,
|
| 103 |
+
inputs_embeds: torch.Tensor,
|
| 104 |
+
multimodal_embeddings: torch.Tensor,
|
| 105 |
+
placeholder_token_id: Union[int, List[int]],
|
| 106 |
+
) -> torch.Tensor:
|
| 107 |
+
"""
|
| 108 |
+
Merge ``multimodal_embeddings`` into ``inputs_embeds`` by overwriting the
|
| 109 |
+
positions in ``inputs_embeds`` corresponding to placeholder tokens in
|
| 110 |
+
``input_ids``.
|
| 111 |
+
|
| 112 |
+
``placeholder_token_id`` can be a list of token ids (e.g, token ids
|
| 113 |
+
of img_start, img_break, and img_end tokens) when needed: This means
|
| 114 |
+
the order of these tokens in the ``input_ids`` MUST MATCH the order of
|
| 115 |
+
their embeddings in ``multimodal_embeddings`` since we need to
|
| 116 |
+
slice-merge instead of individually scattering.
|
| 117 |
+
|
| 118 |
+
For example, if input_ids is "TTTTTSIIIBIIIBIIIETTT", where
|
| 119 |
+
- T is text token
|
| 120 |
+
- S is image start token
|
| 121 |
+
- I is image embedding token
|
| 122 |
+
- B is image break token
|
| 123 |
+
- E is image end token.
|
| 124 |
+
|
| 125 |
+
Then the image embeddings (that correspond to I's) from vision encoder
|
| 126 |
+
must be padded with embeddings of S, B, and E in the same order of
|
| 127 |
+
input_ids for a correct embedding merge.
|
| 128 |
+
|
| 129 |
+
Note:
|
| 130 |
+
This updates ``inputs_embeds`` in place.
|
| 131 |
+
"""
|
| 132 |
+
if isinstance(placeholder_token_id, list):
|
| 133 |
+
placeholder_token_id = torch.tensor(placeholder_token_id,
|
| 134 |
+
device=input_ids.device)
|
| 135 |
+
return _merge_multimodal_embeddings(
|
| 136 |
+
inputs_embeds,
|
| 137 |
+
torch.isin(input_ids, placeholder_token_id),
|
| 138 |
+
multimodal_embeddings,
|
| 139 |
+
)
|
| 140 |
+
return _merge_multimodal_embeddings(
|
| 141 |
+
inputs_embeds,
|
| 142 |
+
(input_ids == placeholder_token_id),
|
| 143 |
+
multimodal_embeddings,
|
| 144 |
+
)
|