Krishna Indukuri commited on
Commit
c6617d2
·
verified ·
1 Parent(s): e20fe64

Upload 23 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,36 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual Environment
24
+ venv/
25
+ env/
26
+ ENV/
27
+ .env
28
+ .venv
29
+ env.bak/
30
+ venv.bak/
31
+
32
+ # IDE
33
+ .idea/
34
+ .vscode/
35
+ *.swp
36
+ *.swo
37
+ .project
38
+ .pydevproject
39
+ .settings/
40
+
41
+ # Jupyter Notebook
42
+ .ipynb_checkpoints
43
+ *.ipynb
44
+
45
+ # Distribution / packaging
46
+ .Python
47
+ *.manifest
48
+ *.spec
49
+
50
+ # Unit test / coverage reports
51
+ htmlcov/
52
+ .tox/
53
+ .coverage
54
+ .coverage.*
55
+ .cache
56
+ nosetests.xml
57
+ coverage.xml
58
+ *.cover
59
+ .hypothesis/
60
+
61
+ # Logs and databases
62
+ *.log
63
+ *.sqlite
64
+ *.db
65
+
66
+ # OS generated files
67
+ .DS_Store
68
+ .DS_Store?
69
+ ._*
70
+ .Spotlight-V100
71
+ .Trashes
72
+ ehthumbs.db
73
+ Thumbs.db
LICENSE ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Qwen RESEARCH LICENSE AGREEMENT
2
+
3
+ Qwen RESEARCH LICENSE AGREEMENT Release Date: September 19, 2024
4
+
5
+ By clicking to agree or by using or distributing any portion or element of the Qwen Materials, you will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
6
+
7
+ 1. Definitions
8
+ a. This Qwen RESEARCH LICENSE AGREEMENT (this "Agreement") shall mean the terms and conditions for use, reproduction, distribution and modification of the Materials as defined by this Agreement.
9
+ b. "We" (or "Us") shall mean Alibaba Cloud.
10
+ c. "You" (or "Your") shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Materials for any purpose and in any field of use.
11
+ d. "Third Parties" shall mean individuals or legal entities that are not under common control with us or you.
12
+ e. "Qwen" shall mean the large language models, and software and algorithms, consisting of trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by us.
13
+ f. "Materials" shall mean, collectively, Alibaba Cloud's proprietary Qwen and Documentation (and any portion thereof) made available under this Agreement.
14
+ g. "Source" form shall mean the preferred form for making modifications, including but not limited to model source code, documentation source, and configuration files.
15
+ h. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
16
+ i. "Non-Commercial" shall mean for research or evaluation purposes only.
17
+
18
+ 2. Grant of Rights
19
+ a. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Alibaba Cloud's intellectual property or other rights owned by us embodied in the Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Materials FOR NON-COMMERCIAL PURPOSES ONLY.
20
+ b. If you are commercially using the Materials, you shall request a license from us.
21
+
22
+ 3. Redistribution
23
+ You may distribute copies or make the Materials, or derivative works thereof, available as part of a product or service that contains any of them, with or without modifications, and in Source or Object form, provided that you meet the following conditions:
24
+ a. You shall give any other recipients of the Materials or derivative works a copy of this Agreement;
25
+ b. You shall cause any modified files to carry prominent notices stating that you changed the files;
26
+ c. You shall retain in all copies of the Materials that you distribute the following attribution notices within a "Notice" text file distributed as a part of such copies: "Qwen is licensed under the Qwen RESEARCH LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved."; and
27
+ d. You may add your own copyright statement to your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of your modifications, or for any such derivative works as a whole, provided your use, reproduction, and distribution of the work otherwise complies with the terms and conditions of this Agreement.
28
+
29
+ 4. Rules of use
30
+ a. The Materials may be subject to export controls or restrictions in China, the United States or other countries or regions. You shall comply with applicable laws and regulations in your use of the Materials.
31
+ b. If you use the Materials or any outputs or results therefrom to create, train, fine-tune, or improve an AI model that is distributed or made available, you shall prominently display “Built with Qwen” or “Improved using Qwen” in the related product documentation.
32
+
33
+ 5. Intellectual Property
34
+ a. We retain ownership of all intellectual property rights in and to the Materials and derivatives made by or for us. Conditioned upon compliance with the terms and conditions of this Agreement, with respect to any derivative works and modifications of the Materials that are made by you, you are and will be the owner of such derivative works and modifications.
35
+ b. No trademark license is granted to use the trade names, trademarks, service marks, or product names of us, except as required to fulfill notice requirements under this Agreement or as required for reasonable and customary use in describing and redistributing the Materials.
36
+ c. If you commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against us or any entity alleging that the Materials or any output therefrom, or any part of the foregoing, infringe any intellectual property or other right owned or licensable by you, then all licenses granted to you under this Agreement shall terminate as of the date such lawsuit or other proceeding is commenced or brought.
37
+ 6. Disclaimer of Warranty and Limitation of Liability
38
+ a. We are not obligated to support, update, provide training for, or develop any further version of the Qwen Materials or to grant any license thereto.
39
+ b. THE MATERIALS ARE PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. WE MAKE NO WARRANTY AND ASSUME NO RESPONSIBILITY FOR THE SAFETY OR STABILITY OF THE MATERIALS AND ANY OUTPUT THEREFROM.
40
+ c. IN NO EVENT SHALL WE BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE MATERIALS OR ANY OUTPUT OF IT, NO MATTER HOW IT’S CAUSED.
41
+ d. You will defend, indemnify and hold harmless us from and against any claim by any third party arising out of or related to your use or distribution of the Materials.
42
+
43
+ 7. Survival and Termination.
44
+ a. The term of this Agreement shall commence upon your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
45
+ b. We may terminate this Agreement if you breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, you must delete and cease use of the Materials. Sections 6 and 8 shall survive the termination of this Agreement.
46
+
47
+ 8. Governing Law and Jurisdiction.
48
+ a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
49
+ b. The People's Courts in Hangzhou City shall have exclusive jurisdiction over any dispute arising out of this Agreement.
50
+ 9. Other Terms and Conditions.
51
+ a. Any arrangements, understandings, or agreements regarding the Material not stated herein are separate from and independent of the terms and conditions of this Agreement. You shall request a separate license from us, if you use the Materials in ways not expressly agreed to in this Agreement.
52
+ b. We shall not be bound by any additional or different terms or conditions communicated by you unless expressly agreed.
README.md ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - vidore
4
+ - colpali
5
+ - multimodal-embedding
6
+ - multilingual-embedding
7
+ - Text-to-Visual Document (T→VD) retrieval
8
+ - feature-extraction
9
+ - sentence-similarity
10
+ - mteb
11
+ - sentence-transformers
12
+ language:
13
+ - multilingual
14
+ inference: false
15
+ library_name: transformers
16
+ pipeline_tag: visual-document-retrieval
17
+ ---
18
+ <br><br>
19
+
20
+ <p align="center">
21
+ <img src="https://huggingface.co/datasets/jinaai/documentation-images/resolve/main/logo.webp" alt="Jina AI: Your Search Foundation, Supercharged!" width="150px">
22
+ </p>
23
+
24
+
25
+ <p align="center">
26
+ <b>The embedding model trained by <a href="https://jina.ai/"><b>Jina AI</b></a>.</b>
27
+ </p>
28
+
29
+ # Jina Embeddings v4: Universal Embeddings for Multimodal Multilingual Retrieval
30
+
31
+
32
+ [GGUF](https://github.com/jina-ai/jina-embeddings-v4-gguf) | [Blog](https://jina.ai/news/jina-embeddings-v4-universal-embeddings-for-multimodal-multilingual-retrieval) | [Technical Report](https://arxiv.org/abs/2506.18902) | [API](https://jina.ai/embeddings)
33
+
34
+
35
+ ## Intended Usage & Model Info
36
+ `jina-embeddings-v4` is a universal embedding model for multimodal and multilingual retrieval.
37
+ The model is specially designed for complex document retrieval, including visually rich documents with charts, tables, and illustrations.
38
+
39
+
40
+ Built on [Qwen/Qwen2.5-VL-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct), `jina-embeddings-v4` features:
41
+
42
+ - **Unified embeddings** for text, images, and visual documents, supporting both dense (single-vector) and late-interaction (multi-vector) retrieval.
43
+ - **Multilingual support** (30+ languages) and compatibility with a wide range of domains, including technical and visually complex documents.
44
+ - **Task-specific adapters** for retrieval, text matching, and code-related tasks, which can be selected at inference time.
45
+ - **Flexible embedding size**: dense embeddings are 2048 dimensions by default but can be truncated to as low as 128 with minimal performance loss.
46
+
47
+
48
+ Summary of features:
49
+
50
+ | Feature | Jina Embeddings V4 |
51
+ |------------|------------|
52
+ | Base Model | Qwen2.5-VL-3B-Instruct |
53
+ | Supported Tasks | `retrieval`, `text-matching`, `code` |
54
+ | Model DType | BFloat 16 |
55
+ | Max Sequence Length | 32768 |
56
+ | Single-Vector Dimension | 2048 |
57
+ | Multi-Vector Dimension | 128 |
58
+ | Matryoshka dimensions | 128, 256, 512, 1024, 2048 |
59
+ | Pooling Strategy | Mean pooling |
60
+ | Attention Mechanism | FlashAttention2 |
61
+
62
+
63
+
64
+ ## Training & Evaluation
65
+
66
+ Please refer to our [technical report of jina-embeddings-v4](https://arxiv.org/abs/2506.18902) for training details and benchmarks.
67
+
68
+
69
+ ## Usage
70
+
71
+ <details>
72
+ <summary>Requirements</a></summary>
73
+
74
+ The following Python packages are required:
75
+
76
+ - `transformers>=4.52.0`
77
+ - `torch>=2.6.0`
78
+ - `peft>=0.15.2`
79
+ - `torchvision`
80
+ - `pillow`
81
+
82
+ ### Optional / Recommended
83
+ - **flash-attention**: Installing [flash-attention](https://github.com/Dao-AILab/flash-attention) is recommended for improved inference speed and efficiency, but not mandatory.
84
+ - **sentence-transformers**: If you want to use the model via the `sentence-transformers` interface, install this package as well.
85
+
86
+ </details>
87
+
88
+
89
+ <details>
90
+ <summary>via <a href="https://jina.ai/embeddings/">Jina AI Embeddings API</a></summary>
91
+
92
+
93
+ ```bash
94
+ curl https://api.jina.ai/v1/embeddings \
95
+ -H "Content-Type: application/json" \
96
+ -H "Authorization: Bearer $JINA_AI_API_TOKEN" \
97
+ -d @- <<EOFEOF
98
+ {
99
+ "model": "jina-embeddings-v4",
100
+ "task": "text-matching",
101
+ "input": [
102
+ {
103
+ "text": "غروب جميل على الشاطئ"
104
+ },
105
+ {
106
+ "text": "海滩上美丽的日落"
107
+ },
108
+ {
109
+ "text": "A beautiful sunset over the beach"
110
+ },
111
+ {
112
+ "text": "Un beau coucher de soleil sur la plage"
113
+ },
114
+ {
115
+ "text": "Ein wunderschöner Sonnenuntergang am Strand"
116
+ },
117
+ {
118
+ "text": "Ένα όμορφο ηλιοβασίλεμα πάνω από την παραλία"
119
+ },
120
+ {
121
+ "text": "समुद्र तट पर एक खूबसूरत सूर्यास्त"
122
+ },
123
+ {
124
+ "text": "Un bellissimo tramonto sulla spiaggia"
125
+ },
126
+ {
127
+ "text": "浜辺に沈む美しい夕日"
128
+ },
129
+ {
130
+ "text": "해변 위로 아름다운 일몰"
131
+ },
132
+ {
133
+ "image": "https://i.ibb.co/nQNGqL0/beach1.jpg"
134
+ },
135
+ {
136
+ "image": "https://i.ibb.co/r5w8hG8/beach2.jpg"
137
+ }
138
+ ]
139
+ }
140
+ EOFEOF
141
+ ```
142
+
143
+ </details>
144
+
145
+ <details>
146
+ <summary>via <a href="https://huggingface.co/docs/transformers/en/index">transformers</a></summary>
147
+
148
+ ```python
149
+ # !pip install transformers>=4.52.0 torch>=2.6.0 peft>=0.15.2 torchvision pillow
150
+ # !pip install
151
+ from transformers import AutoModel
152
+ import torch
153
+
154
+ # Initialize the model
155
+ model = AutoModel.from_pretrained("jinaai/jina-embeddings-v4", trust_remote_code=True, torch_dtype=torch.float16)
156
+
157
+ model.to("cuda")
158
+
159
+ # ========================
160
+ # 1. Retrieval Task
161
+ # ========================
162
+ # Configure truncate_dim, max_length (for texts), max_pixels (for images), vector_type, batch_size in the encode function if needed
163
+
164
+ # Encode query
165
+ query_embeddings = model.encode_text(
166
+ texts=["Overview of climate change impacts on coastal cities"],
167
+ task="retrieval",
168
+ prompt_name="query",
169
+ )
170
+
171
+ # Encode passage (text)
172
+ passage_embeddings = model.encode_text(
173
+ texts=[
174
+ "Climate change has led to rising sea levels, increased frequency of extreme weather events..."
175
+ ],
176
+ task="retrieval",
177
+ prompt_name="passage",
178
+ )
179
+
180
+ # Encode image/document
181
+ image_embeddings = model.encode_image(
182
+ images=["https://i.ibb.co/nQNGqL0/beach1.jpg"],
183
+ task="retrieval",
184
+ )
185
+
186
+ # ========================
187
+ # 2. Text Matching Task
188
+ # ========================
189
+ texts = [
190
+ "غروب جميل على الشاطئ", # Arabic
191
+ "海滩上美丽的日落", # Chinese
192
+ "Un beau coucher de soleil sur la plage", # French
193
+ "Ein wunderschöner Sonnenuntergang am Strand", # German
194
+ "Ένα όμορφο ηλιοβασίλεμα πάνω από την παραλία", # Greek
195
+ "समुद्र तट पर एक खूबसूरत सूर्यास्त", # Hindi
196
+ "Un bellissimo tramonto sulla spiaggia", # Italian
197
+ "浜辺に沈む美しい夕日", # Japanese
198
+ "해변 위로 아름다운 일몰", # Korean
199
+ ]
200
+
201
+ text_embeddings = model.encode_text(texts=texts, task="text-matching")
202
+
203
+ # ========================
204
+ # 3. Code Understanding Task
205
+ # ========================
206
+
207
+ # Encode query
208
+ query_embedding = model.encode_text(
209
+ texts=["Find a function that prints a greeting message to the console"],
210
+ task="code",
211
+ prompt_name="query",
212
+ )
213
+
214
+ # Encode code
215
+ code_embeddings = model.encode_text(
216
+ texts=["def hello_world():\n print('Hello, World!')"],
217
+ task="code",
218
+ prompt_name="passage",
219
+ )
220
+
221
+ # ========================
222
+ # 4. Use multivectors
223
+ # ========================
224
+
225
+ multivector_embeddings = model.encode_text(
226
+ texts=texts,
227
+ task="retrieval",
228
+ prompt_name="query",
229
+ return_multivector=True,
230
+ )
231
+
232
+ images = ["https://i.ibb.co/nQNGqL0/beach1.jpg", "https://i.ibb.co/r5w8hG8/beach2.jpg"]
233
+ multivector_image_embeddings = model.encode_image(
234
+ images=images,
235
+ task="retrieval",
236
+ return_multivector=True,
237
+ )
238
+ ```
239
+ </details>
240
+
241
+ <details>
242
+ <summary>via <a href="https://sbert.net/">sentence-transformers</a></summary>
243
+
244
+ ```python
245
+ from sentence_transformers import SentenceTransformer
246
+
247
+ # Initialize the model
248
+ model = SentenceTransformer("jinaai/jina-embeddings-v4", trust_remote_code=True)
249
+ # ========================
250
+ # 1. Retrieval Task
251
+ # ========================
252
+ # Encode query
253
+ query_embeddings = model.encode(
254
+ sentences=["Overview of climate change impacts on coastal cities"],
255
+ task="retrieval",
256
+ prompt_name="query",
257
+ )
258
+
259
+ print(f"query_embeddings.shape = {query_embeddings.shape}")
260
+
261
+ # Encode passage (text)
262
+ passage_embeddings = model.encode(
263
+ sentences=[
264
+ "Climate change has led to rising sea levels, increased frequency of extreme weather events..."
265
+ ],
266
+ task="retrieval",
267
+ prompt_name="passage",
268
+ )
269
+
270
+ print(f"passage_embeddings.shape = {passage_embeddings.shape}")
271
+
272
+ # Encode image/document
273
+ image_embeddings = model.encode(
274
+ sentences=["https://i.ibb.co/nQNGqL0/beach1.jpg"],
275
+ task="retrieval",
276
+ )
277
+
278
+ print(f"image_embeddings.shape = {image_embeddings.shape}")
279
+
280
+ # ========================
281
+ # 2. Text Matching Task
282
+ # ========================
283
+ texts = [
284
+ "غروب جميل على الشاطئ", # Arabic
285
+ "海滩上美丽的日落", # Chinese
286
+ "Un beau coucher de soleil sur la plage", # French
287
+ "Ein wunderschöner Sonnenuntergang am Strand", # German
288
+ "Ένα όμορφο ηλιοβασίλεμα πάνω από την παραλία", # Greek
289
+ "समुद्र तट पर एक खूबसूरत सूर्यास्त", # Hindi
290
+ "Un bellissimo tramonto sulla spiaggia", # Italian
291
+ "浜辺に沈む美しい夕日", # Japanese
292
+ "해변 위로 아름다운 일몰", # Korean
293
+ ]
294
+
295
+ text_embeddings = model.encode(sentences=texts, task="text-matching")
296
+
297
+ # ========================
298
+ # 3. Code Understanding Task
299
+ # ========================
300
+
301
+ # Encode query
302
+ query_embeddings = model.encode(
303
+ sentences=["Find a function that prints a greeting message to the console"],
304
+ task="code",
305
+ prompt_name="query",
306
+ )
307
+
308
+ # Encode code
309
+ code_embeddings = model.encode(
310
+ sentences=["def hello_world():\n print('Hello, World!')"],
311
+ task="code",
312
+ prompt_name="passage",
313
+ )
314
+
315
+ # ========================
316
+ # 4. Use multivectors
317
+ # ========================
318
+ # If you want to use multi-vector embeddings, please use the Hugging Face model directly.
319
+ ```
320
+ </details>
321
+
322
+ <details>
323
+ <summary>via <a href="https://github.com/vllm-project/vllm">vLLM</a></summary>
324
+
325
+ We provide separate model versions for each task (`retrieval`, `text-matching`, `code`) where specific adapter is merged into the base `Qwen2.5-VL` weights.
326
+ This modification enables native compatibility with vLLM.
327
+
328
+ Instructions and usage examples for each task are available in their respective directories:
329
+ - [jina-embeddings-v4-vllm-retrieval](https://huggingface.co/jinaai/jina-embeddings-v4-vllm-retrieval)
330
+ - [jina-embeddings-v4-vllm-text-matching](https://huggingface.co/jinaai/jina-embeddings-v4-vllm-text-matching)
331
+ - [jina-embeddings-v4-vllm-code](https://huggingface.co/jinaai/jina-embeddings-v4-vllm-code)
332
+
333
+ Please refer to the directory that matches your task for more details.
334
+
335
+ </details>
336
+
337
+
338
+ ## Jina-VDR
339
+ Alongside `jina-embeddings-v4`, we’re releasing [Jina VDR](https://github.com/jina-ai/jina-vdr), a multilingual, multi-domain benchmark for visual document retrieval. The task collection can be viewed [here](https://huggingface.co/collections/jinaai/jinavdr-visual-document-retrieval-684831c022c53b21c313b449), and evaluation instructions can be found [here](https://github.com/jina-ai/jina-vdr).
340
+
341
+
342
+ ## License
343
+
344
+ This model was initially released under cc-by-nc-4.0 due to an error.
345
+ The correct license is the Qwen Research License, as this model is derived from Qwen-2.5-VL-3B which is governed by that license.
346
+
347
+ ## Contact
348
+
349
+ Join our [Discord community](https://discord.jina.ai) and chat with other community members about ideas.
350
+
351
+
352
+ ## Citation
353
+
354
+ If you find `jina-embeddings-v4` useful in your research, please cite the following paper:
355
+ ```
356
+ @misc{günther2025jinaembeddingsv4universalembeddingsmultimodal,
357
+ title={jina-embeddings-v4: Universal Embeddings for Multimodal Multilingual Retrieval},
358
+ author={Michael Günther and Saba Sturua and Mohammad Kalim Akram and Isabelle Mohr and Andrei Ungureanu and Sedigheh Eslami and Scott Martens and Bo Wang and Nan Wang and Han Xiao},
359
+ year={2025},
360
+ eprint={2506.18902},
361
+ archivePrefix={arXiv},
362
+ primaryClass={cs.AI},
363
+ url={https://arxiv.org/abs/2506.18902},
364
+ }
365
+ ```
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58b54bbe36fc752f79a24a271ef66a0a0830054b4dfad94bde757d851968060b
3
+ size 605
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94174d7176c52a7192f96fc34eb2cf23c7c2059d63cdbfadca1586ba89731fb7
3
+ size 1049
config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bd26fbef1d930d8f968729480fd8ce1dbdd468d5f32079ef3eb0e3e2c59ba24
3
+ size 2751
config_sentence_transformers.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eee316c1ced66356d6472a0f3e2ff28084e8a693cbb2bb758ed98cc3f20ba22
3
+ size 274
configuration_jina_embeddings_v4.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.models.qwen2_5_vl import Qwen2_5_VLConfig
2
+
3
+ from typing import Optional
4
+
5
+
6
+ class JinaEmbeddingsV4Config(Qwen2_5_VLConfig):
7
+ """
8
+ Configuration for the JinaEmbeddingsV4 model.
9
+ """
10
+
11
+ def __init__(
12
+ self,
13
+ single_vector_pool_strategy: str = "mean",
14
+ multi_vector_projector_dim: int = 128,
15
+ pretrained_peft_model_name_or_path: Optional[str] = None,
16
+ verbosity: int = 1,
17
+ **kwargs,
18
+ ):
19
+ super().__init__(**kwargs)
20
+ self.single_vector_pool_strategy = single_vector_pool_strategy
21
+ self.multi_vector_projector_dim = multi_vector_projector_dim
22
+ self.pretrained_peft_model_name_or_path = pretrained_peft_model_name_or_path
23
+ self.verbosity = verbosity
custom_lora_module.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ import warnings
5
+ from typing import Any, Optional, Union, List
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from peft.tuners.lora import LoraLayer
11
+
12
+ class MultiAdapterLinear(nn.Module, LoraLayer):
13
+ """
14
+ Custom LoRA module supporting multiple adapters for a linear layer.
15
+
16
+ This module extends the standard LoRA implementation to support multiple task-specific
17
+ adapters that can be dynamically selected during the forward pass. The task_label
18
+ parameter passed to the forward function determines which LoRA adapter(s) to use:
19
+ - If task_label is a string, all examples in the batch use the same adapter
20
+ - If task_label is a list of strings, each example can use a different adapter
21
+
22
+ This enables efficient multi-task inference where all task-specific LoRA adapters
23
+ are loaded in memory simultaneously and dynamically selected per example, eliminating
24
+ the need to switch adapter states between tasks and allowing optimal throughput
25
+ for mixed-task batches.
26
+
27
+ Derived from peft.tuners.lora.Linear.
28
+ """
29
+ def __init__(
30
+ self,
31
+ base_layer,
32
+ adapter_name: str,
33
+ task_names: List[str],
34
+ r: int = 0,
35
+ lora_alpha: int = 1,
36
+ lora_dropout: float = 0.0,
37
+ fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
38
+ is_target_conv_1d_layer: bool = False,
39
+ init_lora_weights: Union[bool, str] = True,
40
+ use_rslora: bool = False,
41
+ use_dora: bool = False,
42
+ lora_bias: bool = False,
43
+ **kwargs,
44
+ ) -> None:
45
+ super().__init__()
46
+ LoraLayer.__init__(self, base_layer, **kwargs)
47
+
48
+ self.fan_in_fan_out = fan_in_fan_out
49
+ self.task_names = task_names
50
+ self._active_adapter = adapter_name
51
+ self.update_layer(
52
+ adapter_name,
53
+ r,
54
+ lora_alpha=lora_alpha,
55
+ lora_dropout=lora_dropout,
56
+ init_lora_weights=init_lora_weights,
57
+ use_rslora=use_rslora,
58
+ use_dora=use_dora,
59
+ lora_bias=lora_bias,
60
+ )
61
+ self.is_target_conv_1d_layer = is_target_conv_1d_layer
62
+
63
+
64
+ def forward(self, x: torch.Tensor, task_label: Union[str, List[str]], *args: Any, **kwargs: Any) -> torch.Tensor:
65
+ self._check_forward_args(x, *args, **kwargs)
66
+
67
+ if self.disable_adapters:
68
+ if self.merged:
69
+ self.unmerge()
70
+ result = self.base_layer(x, *args, **kwargs)
71
+ elif self.merged:
72
+ result = self.base_layer(x, *args, **kwargs)
73
+ else:
74
+ result = self.base_layer(x, *args, **kwargs)
75
+ torch_result_dtype = result.dtype
76
+
77
+ lora_A_keys = self.lora_A.keys()
78
+ for active_adapter in self.active_adapters:
79
+ if active_adapter not in lora_A_keys:
80
+ continue
81
+
82
+ if isinstance(task_label, str):
83
+ lora_A = self.lora_A[active_adapter][task_label]
84
+ lora_B = self.lora_B[active_adapter][task_label]
85
+ dropout = self.lora_dropout[active_adapter]
86
+ scaling = self.scaling[active_adapter]
87
+ x = self._cast_input_dtype(x, lora_A.weight.dtype)
88
+ result = result + lora_B(lora_A(dropout(x))) * scaling
89
+ else:
90
+ unique_tasks = list(set(task_label))
91
+ lora_output = torch.zeros_like(result)
92
+
93
+ for task in unique_tasks:
94
+ task_indices = [i for i, t in enumerate(task_label) if t == task]
95
+ task_x = x[task_indices]
96
+
97
+ lora_A = self.lora_A[active_adapter][task]
98
+ lora_B = self.lora_B[active_adapter][task]
99
+ dropout = self.lora_dropout[active_adapter]
100
+ scaling = self.scaling[active_adapter]
101
+
102
+ task_x = self._cast_input_dtype(task_x, lora_A.weight.dtype)
103
+ task_lora_value = lora_B(lora_A(dropout(task_x))) * scaling
104
+
105
+ for i, idx in enumerate(task_indices):
106
+ lora_output[idx] = task_lora_value[i]
107
+
108
+ result = result + lora_output
109
+
110
+ result = result.to(torch_result_dtype)
111
+
112
+ return result
113
+
114
+ def __repr__(self) -> str:
115
+ rep = super().__repr__()
116
+ return "lora." + rep
117
+
118
+
119
+ def update_layer(
120
+ self,
121
+ adapter_name,
122
+ r,
123
+ lora_alpha,
124
+ lora_dropout,
125
+ init_lora_weights,
126
+ use_rslora,
127
+ use_dora: bool = False,
128
+ lora_bias: bool = False,
129
+ ):
130
+ # This code works for linear layers, override for other layer types
131
+ if r <= 0:
132
+ raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
133
+
134
+ self.r[adapter_name] = r
135
+ self.lora_alpha[adapter_name] = lora_alpha
136
+ if lora_dropout > 0.0:
137
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
138
+ else:
139
+ lora_dropout_layer = nn.Identity()
140
+
141
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
142
+ # Actual trainable parameters
143
+ self.lora_A[adapter_name] = nn.ModuleDict({
144
+ task_name: nn.Linear(self.in_features, r, bias=False)
145
+ for task_name in self.task_names
146
+ })
147
+ self.lora_B[adapter_name] = nn.ModuleDict({
148
+ task_name: nn.Linear(r, self.out_features, bias=lora_bias)
149
+ for task_name in self.task_names
150
+ })
151
+ self.lora_bias[adapter_name] = lora_bias
152
+
153
+ if use_rslora:
154
+ self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
155
+ else:
156
+ self.scaling[adapter_name] = lora_alpha / r
157
+
158
+ self.reset_lora_parameters(adapter_name, init_lora_weights)
159
+ self._move_adapter_to_device_of_base_layer(adapter_name)
160
+ self.use_dora[adapter_name] = False
161
+ self.set_adapter(self.active_adapters)
162
+
163
+ def reset_lora_parameters(self, adapter_name, init_lora_weights):
164
+ if init_lora_weights is False:
165
+ return
166
+ if init_lora_weights is True:
167
+ # initialize A the same way as the default for nn.Linear and B to zero
168
+ # https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124
169
+ for task_name in self.task_names:
170
+ nn.init.kaiming_uniform_(self.lora_A[adapter_name][task_name].weight, a=math.sqrt(5))
171
+ elif init_lora_weights.lower() == "gaussian":
172
+ for task_name in self.task_names:
173
+ nn.init.normal_(self.lora_A[adapter_name][task_name].weight, std=1 / self.r[adapter_name])
174
+ else:
175
+ raise ValueError(f"Unknown initialization {init_lora_weights=}")
176
+ for task_name in self.task_names:
177
+ nn.init.zeros_(self.lora_B[adapter_name][task_name].weight)
178
+ if self.lora_bias[adapter_name]:
179
+ for task_name in self.task_names:
180
+ nn.init.zeros_(self.lora_B[adapter_name][task_name].bias)
181
+
182
+
183
+ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
184
+ """
185
+ Merge the active adapter weights into the base weights
186
+ """
187
+ raise NotImplementedError("Merge operation is not supported")
188
+
189
+ def unmerge(self) -> None:
190
+ """
191
+ This method unmerges all merged adapter layers from the base weights.
192
+ """
193
+ raise NotImplementedError("Unmerge operation is not supported")
custom_st.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from io import BytesIO
4
+ from pathlib import Path
5
+ from typing import Any, Dict, List, Literal, Optional, Union
6
+
7
+ import requests
8
+ import torch
9
+ from PIL import Image
10
+ from torch import nn
11
+ from transformers import AutoConfig, AutoModel, AutoProcessor
12
+
13
+
14
+ class Transformer(nn.Module):
15
+
16
+ save_in_root: bool = True
17
+
18
+ def __init__(
19
+ self,
20
+ model_name_or_path: str = "jinaai/jina-embeddings-v4",
21
+ max_seq_length: Optional[int] = None,
22
+ config_args: Optional[Dict[str, Any]] = None,
23
+ model_args: Optional[Dict[str, Any]] = None,
24
+ tokenizer_args: Optional[Dict[str, Any]] = None,
25
+ cache_dir: Optional[str] = None,
26
+ backend: Literal["torch", "onnx", "openvino"] = "torch",
27
+ **kwargs,
28
+ ) -> None:
29
+ super(Transformer, self).__init__()
30
+ if backend != "torch":
31
+ raise ValueError(
32
+ f"Backend '{backend}' is not supported, please use 'torch' instead"
33
+ )
34
+ config_kwargs = config_args or {}
35
+ model_kwargs = model_args or {}
36
+ tokenizer_kwargs = tokenizer_args or {}
37
+
38
+ self.config = AutoConfig.from_pretrained(
39
+ model_name_or_path, cache_dir=cache_dir, **config_kwargs
40
+ )
41
+ self.default_task = model_args.pop("default_task", None)
42
+ if self.default_task and self.default_task not in self.config.task_names:
43
+ raise ValueError(
44
+ f"Invalid task: {self.default_task}. Must be one of {self.config.task_names}."
45
+ )
46
+
47
+ self.model = AutoModel.from_pretrained(
48
+ model_name_or_path, config=self.config, cache_dir=cache_dir, **model_kwargs
49
+ )
50
+ self.processor = AutoProcessor.from_pretrained(
51
+ model_name_or_path,
52
+ cache_dir=cache_dir,
53
+ use_fast=True,
54
+ **tokenizer_kwargs,
55
+ )
56
+ self.max_seq_length = max_seq_length or 8192
57
+
58
+ def tokenize(
59
+ self, texts: List[Union[str, Image.Image]], padding: Union[str, bool] = True
60
+ ) -> Dict[str, torch.Tensor]:
61
+ encoding = {}
62
+ text_indices = []
63
+ image_indices = []
64
+ for i, text in enumerate(texts):
65
+ if isinstance(text, str):
66
+ # Remove Query: or Passage: prefixes when checking for URLs or file paths
67
+ clean_text = text
68
+ if text.startswith("Query: "):
69
+ clean_text = text[len("Query: ") :]
70
+ elif text.startswith("Passage: "):
71
+ clean_text = text[len("Passage: ") :]
72
+
73
+ if clean_text.startswith("http"):
74
+ response = requests.get(clean_text)
75
+ texts[i] = Image.open(BytesIO(response.content)).convert("RGB")
76
+ image_indices.append(i)
77
+ else:
78
+ try:
79
+ if Path(clean_text).is_file():
80
+ texts[i] = Image.open(clean_text).convert("RGB")
81
+ image_indices.append(i)
82
+ else:
83
+ text_indices.append(i)
84
+ except Exception as e:
85
+ text_indices.append(i)
86
+ elif isinstance(text, Image.Image):
87
+ image_indices.append(i)
88
+ else:
89
+ raise ValueError(f"Invalid input type: {type(text)}")
90
+ if text_indices:
91
+ _texts = [texts[i] for i in text_indices]
92
+ text_features = self.processor.process_texts(
93
+ _texts, max_length=self.max_seq_length
94
+ )
95
+ for key, value in text_features.items():
96
+ encoding[f"text_{key}"] = value
97
+ encoding["text_indices"] = text_indices
98
+
99
+ if image_indices:
100
+ _images = [texts[i] for i in image_indices]
101
+ img_features = self.processor.process_images(_images)
102
+ for key, value in img_features.items():
103
+ encoding[f"image_{key}"] = value
104
+ encoding["image_indices"] = image_indices
105
+
106
+ return encoding
107
+
108
+ def forward(
109
+ self,
110
+ features: Dict[str, torch.Tensor],
111
+ task: Optional[str] = None,
112
+ truncate_dim: Optional[int] = None,
113
+ ) -> Dict[str, torch.Tensor]:
114
+ self.model.eval()
115
+
116
+ if task is None:
117
+ if self.default_task is None:
118
+ raise ValueError(
119
+ "Task must be specified before encoding data. You can set it either during "
120
+ "loading the model (e.g., model_kwargs={'default_task': 'retrieval'}) or "
121
+ "pass it as an argument to the encode method (e.g., model.encode(texts, task='retrieval'))."
122
+ )
123
+ task = self.default_task
124
+ else:
125
+ if task not in self.config.task_names:
126
+ raise ValueError(
127
+ f"Invalid task: {task}. Must be one of {self.config.task_names}."
128
+ )
129
+
130
+ device = self.model.device.type
131
+ all_embeddings = []
132
+
133
+ with torch.no_grad():
134
+ if any(k.startswith("text_") for k in features.keys()):
135
+ text_batch = {
136
+ k[len("text_") :]: v.to(device)
137
+ for k, v in features.items()
138
+ if k.startswith("text_") and k != "text_indices"
139
+ }
140
+ text_indices = features.get("text_indices", [])
141
+ with torch.autocast(device_type=device, dtype=torch.bfloat16):
142
+ text_embeddings = self.model(
143
+ **text_batch, task_label=task
144
+ ).single_vec_emb
145
+ if truncate_dim:
146
+ text_embeddings = text_embeddings[:, :truncate_dim]
147
+ text_embeddings = torch.nn.functional.normalize(
148
+ text_embeddings, p=2, dim=-1
149
+ )
150
+ for i, embedding in enumerate(text_embeddings):
151
+ all_embeddings.append((text_indices[i], embedding))
152
+
153
+ if any(k.startswith("image_") for k in features.keys()):
154
+ image_batch = {
155
+ k[len("image_") :]: v.to(device)
156
+ for k, v in features.items()
157
+ if k.startswith("image_") and k != "image_indices"
158
+ }
159
+ image_indices = features.get("image_indices", [])
160
+
161
+ with torch.autocast(device_type=device, dtype=torch.bfloat16):
162
+ img_embeddings = self.model(
163
+ **image_batch, task_label=task
164
+ ).single_vec_emb
165
+ if truncate_dim:
166
+ img_embeddings = img_embeddings[:, :truncate_dim]
167
+ img_embeddings = torch.nn.functional.normalize(
168
+ img_embeddings, p=2, dim=-1
169
+ )
170
+
171
+ for i, embedding in enumerate(img_embeddings):
172
+ all_embeddings.append((image_indices[i], embedding))
173
+
174
+ if not all_embeddings:
175
+ raise RuntimeError("No embeddings were generated")
176
+
177
+ all_embeddings.sort(key=lambda x: x[0]) # sort by original index
178
+ combined_embeddings = torch.stack([emb for _, emb in all_embeddings])
179
+ features["sentence_embedding"] = combined_embeddings
180
+
181
+ return features
182
+
183
+ @classmethod
184
+ def load(cls, input_path: str) -> "Transformer":
185
+ return cls(model_name_or_path=input_path)
generation_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4be8e7b43a811255a415e39b11c9b78a3a267120e20dd198774b1a14dcc5ea86
3
+ size 126
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:360eb251d21193fe4a424dcc2f65ee1e9ca66dcbcb497b8141722f9f6ed56f8a
3
+ size 65592
modeling_jina_embeddings_v4.py ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Jina Embeddings V4 Model implementation was inspired by the ColPali codebase:
2
+ # https://github.com/illuin-tech/colpali
3
+
4
+ import os
5
+ from dataclasses import dataclass
6
+ from enum import Enum
7
+ from functools import partial
8
+ from io import BytesIO
9
+ from typing import Any, Callable, ClassVar, Dict, List, Optional, Union, cast
10
+
11
+ import numpy as np
12
+ import requests
13
+ import torch
14
+ from huggingface_hub import snapshot_download
15
+ from peft import LoraConfig, PeftModel
16
+ from PIL import Image
17
+ from torch import nn
18
+ from torch.utils.data import DataLoader
19
+ from tqdm import tqdm
20
+ from transformers import BatchFeature
21
+ from transformers.utils import is_flash_attn_2_available
22
+
23
+ from .configuration_jina_embeddings_v4 import JinaEmbeddingsV4Config
24
+ from .custom_lora_module import MultiAdapterLinear
25
+ from .qwen2_5_vl import Qwen2_5_VLForConditionalGeneration, Qwen2_5_VLProcessor
26
+
27
+
28
+ class PromptType(str, Enum):
29
+ query = "query"
30
+ passage = "passage"
31
+
32
+
33
+ PREFIX_DICT = {"query": "Query", "passage": "Passage"}
34
+
35
+
36
+ class JinaEmbeddingsV4Processor(Qwen2_5_VLProcessor):
37
+ def __init__(self, *args, **kwargs) -> None:
38
+ Qwen2_5_VLProcessor.__init__(self, *args, **kwargs)
39
+ self.assistant_prefix_len = 58
40
+ self.text_max_length = 32768
41
+
42
+ def process_images(
43
+ self,
44
+ images: Union[List[Image.Image], List[List[Image.Image]]],
45
+ ) -> BatchFeature:
46
+
47
+ if isinstance(images[0], list):
48
+ images = cast(List[List[Image.Image]], images)
49
+ text_doc = []
50
+ for i in range(len(images)):
51
+ conversation = [
52
+ {"role": "user", "content": [{"type": "image"}] * len(images[i])}
53
+ ]
54
+ template = self.apply_chat_template(
55
+ conversation, add_generation_prompt=False
56
+ )
57
+ text_doc.append(template[self.assistant_prefix_len :])
58
+
59
+ else:
60
+ images = cast(List[Image.Image], images)
61
+ text_doc = [
62
+ "<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe the image.<|im_end|>\n"
63
+ ] * len(images)
64
+
65
+ # The following code is a hack to make sure the scatter in DDP is done correctly when training on multiple GPUs
66
+ batch_doc = self(text=text_doc, images=images, padding="longest", return_tensors="pt") # type: ignore
67
+ # Separate pixel_values for each image
68
+ offsets = batch_doc["image_grid_thw"][:, 1] * batch_doc["image_grid_thw"][:, 2]
69
+ # Pad pixel_values to the same length to be able to make it into a tensor
70
+ pixel_values = torch.split(batch_doc["pixel_values"], offsets.tolist())
71
+
72
+ max_length = max([len(pv) for pv in pixel_values])
73
+
74
+ pixel_values = [
75
+ torch.cat(
76
+ [
77
+ pv,
78
+ torch.zeros(
79
+ (max_length - len(pv), pv.shape[1]),
80
+ dtype=pv.dtype,
81
+ device=pv.device,
82
+ ),
83
+ ]
84
+ )
85
+ for pv in pixel_values
86
+ ]
87
+
88
+ batch_doc["pixel_values"] = torch.stack(pixel_values)
89
+ return batch_doc
90
+
91
+ def process_texts(
92
+ self,
93
+ texts: List[str],
94
+ max_length: Optional[int] = None,
95
+ prefix: Optional[str] = None,
96
+ padding: Optional[str] = None,
97
+ ) -> BatchFeature:
98
+
99
+ max_length = (
100
+ self.text_max_length
101
+ if max_length is None
102
+ else min(max_length, self.text_max_length)
103
+ )
104
+ padded_texts: List[str] = []
105
+
106
+ for text in texts:
107
+ if prefix:
108
+ text = f"{prefix}: {text}"
109
+ padded_texts.append(text)
110
+
111
+ text_batch = self(
112
+ text=padded_texts,
113
+ return_tensors="pt",
114
+ padding=padding or "longest",
115
+ max_length=max_length,
116
+ truncation=True,
117
+ )
118
+
119
+ return text_batch
120
+
121
+
122
+ @dataclass
123
+ class JinaEmbeddingsV4ModelOutput:
124
+ """
125
+ Base class for the Hybrid Model outputs.
126
+ Args:
127
+ vlm_last_hidden_states (torch.Tensor, optional): Last hidden states of the VLM.
128
+ single_vec_emb (torch.Tensor, optional): Single-vector embeddings.
129
+ multi_vec_emb (torch.Tensor, optional): Multi-vector embeddings.
130
+ """
131
+
132
+ vlm_last_hidden_states: Optional[torch.Tensor] = None
133
+ single_vec_emb: Optional[torch.Tensor] = None
134
+ multi_vec_emb: Optional[torch.Tensor] = None
135
+
136
+
137
+ class JinaEmbeddingsV4Model(Qwen2_5_VLForConditionalGeneration):
138
+ config_class = JinaEmbeddingsV4Config
139
+ main_input_name: ClassVar[str] = "doc_input_ids"
140
+
141
+ def __init__(self, config: JinaEmbeddingsV4Config):
142
+ Qwen2_5_VLForConditionalGeneration.__init__(self, config)
143
+ self._init_projection_layer(config)
144
+ self.post_init()
145
+ self.processor = JinaEmbeddingsV4Processor.from_pretrained(
146
+ self.name_or_path, trust_remote_code=True, use_fast=True
147
+ )
148
+ self.multi_vector_projector_dim = config.multi_vector_projector_dim
149
+ self.verbosity = config.verbosity
150
+ self._task = None
151
+
152
+ @property
153
+ def task(self) -> Optional[str]:
154
+ """Get the current task set for the model."""
155
+ return self._task
156
+
157
+ @task.setter
158
+ def task(self, task: str):
159
+ """
160
+ Set the task for the model.
161
+
162
+ Args:
163
+ task (str): The task name. Must be one of ['retrieval', 'text-matching', 'code']
164
+ """
165
+ if task not in self.config.task_names:
166
+ raise ValueError(
167
+ f"Invalid task: {task}. Must be one of {self.config.task_names}."
168
+ )
169
+ self._task = task
170
+
171
+ def get_last_hidden_states(
172
+ self,
173
+ task_label: Union[str, List[str]],
174
+ input_ids: torch.LongTensor,
175
+ attention_mask: torch.Tensor,
176
+ **kwargs,
177
+ ) -> torch.Tensor:
178
+ if "pixel_values" in kwargs:
179
+ offsets = kwargs["image_grid_thw"][:, 1] * kwargs["image_grid_thw"][:, 2]
180
+ kwargs["pixel_values"] = torch.cat(
181
+ [pv[:o] for pv, o in zip(kwargs["pixel_values"], offsets)], dim=0
182
+ )
183
+ position_ids, rope_deltas = self.model.get_rope_index(
184
+ input_ids=input_ids,
185
+ image_grid_thw=kwargs.get("image_grid_thw", None),
186
+ attention_mask=attention_mask,
187
+ )
188
+
189
+ kwargs["output_hidden_states"] = True
190
+ outputs = super().forward(
191
+ task_label=task_label,
192
+ input_ids=input_ids,
193
+ attention_mask=attention_mask,
194
+ **kwargs,
195
+ position_ids=position_ids,
196
+ rope_deltas=rope_deltas,
197
+ use_cache=False,
198
+ )
199
+
200
+ hidden_states = outputs.hidden_states
201
+ if not hidden_states:
202
+ raise ValueError("Hidden states not found in model output")
203
+
204
+ return hidden_states[-1]
205
+
206
+ def _init_projection_layer(self, config) -> None:
207
+ """
208
+ Initializes projection layers.
209
+ """
210
+ self.config.multi_vector_projector_dim = config.multi_vector_projector_dim
211
+
212
+ self.multi_vector_projector = nn.Linear(
213
+ in_features=self.config.text_config.hidden_size,
214
+ out_features=self.config.multi_vector_projector_dim,
215
+ )
216
+
217
+ def get_single_vector_embeddings(
218
+ self,
219
+ hidden_states: torch.Tensor,
220
+ attention_mask: torch.Tensor,
221
+ input_ids: Optional[torch.LongTensor] = None,
222
+ ) -> torch.Tensor:
223
+ """
224
+ Get the single-vector embeddings from the hidden states.
225
+ """
226
+ if self._input_has_image(input_ids[0]): # got document image
227
+ img_start_positions = torch.where(
228
+ input_ids == self.config.vision_start_token_id
229
+ )[1]
230
+ img_end_positions = torch.where(
231
+ input_ids == self.config.vision_end_token_id
232
+ )[1]
233
+
234
+ batch_size, seq_len = input_ids.shape
235
+ position_indices = torch.arange(seq_len, device=input_ids.device).expand(
236
+ batch_size, -1
237
+ )
238
+ image_mask = (position_indices >= img_start_positions.unsqueeze(1)) & (
239
+ position_indices <= img_end_positions.unsqueeze(1)
240
+ )
241
+
242
+ masked_hidden_states = hidden_states * image_mask.unsqueeze(-1)
243
+ pooled_output = masked_hidden_states.sum(dim=1) / image_mask.sum(
244
+ dim=1, keepdim=True
245
+ )
246
+ else: # got query text
247
+ pooled_output = torch.sum(
248
+ hidden_states * attention_mask.unsqueeze(-1), dim=1
249
+ ) / torch.sum(attention_mask, dim=1, keepdim=True)
250
+
251
+ return torch.nn.functional.normalize(pooled_output, dim=-1)
252
+
253
+ def get_multi_vector_embeddings(
254
+ self,
255
+ task_label: Union[str, List[str]],
256
+ hidden_states: torch.Tensor,
257
+ attention_mask: torch.Tensor,
258
+ ) -> torch.Tensor:
259
+ """
260
+ Project the hidden states to multi-vector embeddings.
261
+ """
262
+ multi_vec_emb = self.multi_vector_projector(
263
+ hidden_states, task_label=task_label
264
+ )
265
+ multi_vec_emb = torch.nn.functional.normalize(multi_vec_emb, dim=-1)
266
+ return multi_vec_emb * attention_mask.unsqueeze(-1)
267
+
268
+ def _input_has_image(self, input_ids):
269
+ return self.config.vision_start_token_id in input_ids
270
+
271
+ def forward(
272
+ self,
273
+ task_label: Union[str, List[str]],
274
+ input_ids: torch.LongTensor,
275
+ attention_mask: torch.Tensor,
276
+ output_vlm_last_hidden_states: bool = False,
277
+ **kwargs,
278
+ ) -> JinaEmbeddingsV4ModelOutput:
279
+ """
280
+ Forward pass through the model. Returns both single-vector and multi-vector embeddings.
281
+ Args:
282
+ input_ids (torch.Tensor): The input tokens tensor.
283
+ attention_mask (torch.Tensor): The attention mask tensor.
284
+ Returns:
285
+ JinaEmbeddingsV4ModelOutput:
286
+ vlm_last_hidden_states (torch.Tensor, optional): Last hidden states of the VLM.
287
+ single_vec_emb (torch.Tensor, optional): Single-vector embeddings.
288
+ multi_vec_emb (torch.Tensor, optional): Multi-vector embeddings.
289
+ """
290
+ # Forward pass through the VLM
291
+ hidden_states = self.get_last_hidden_states(
292
+ input_ids=input_ids,
293
+ attention_mask=attention_mask,
294
+ task_label=task_label,
295
+ **kwargs,
296
+ ) # (batch_size, seq_length, hidden_size)
297
+ # Compute the embeddings
298
+ single_vec_emb = self.get_single_vector_embeddings(
299
+ hidden_states=hidden_states,
300
+ attention_mask=attention_mask,
301
+ input_ids=input_ids,
302
+ )
303
+ multi_vec_emb = self.get_multi_vector_embeddings(
304
+ hidden_states=hidden_states,
305
+ attention_mask=attention_mask,
306
+ task_label=task_label,
307
+ )
308
+
309
+ return JinaEmbeddingsV4ModelOutput(
310
+ vlm_last_hidden_states=(
311
+ hidden_states if output_vlm_last_hidden_states else None
312
+ ),
313
+ single_vec_emb=single_vec_emb,
314
+ multi_vec_emb=multi_vec_emb,
315
+ )
316
+
317
+ def _process_batches(
318
+ self,
319
+ data: List[Union[str, Image.Image]],
320
+ task_label: Union[str, List[str]],
321
+ processor_fn: Callable,
322
+ desc: str,
323
+ return_multivector: bool = False,
324
+ return_numpy: bool = False,
325
+ batch_size: int = 32,
326
+ truncate_dim: Optional[int] = None,
327
+ ) -> Union[np.ndarray, List[torch.Tensor]]:
328
+ dataloader = DataLoader(
329
+ dataset=data,
330
+ batch_size=batch_size,
331
+ shuffle=False,
332
+ collate_fn=processor_fn,
333
+ )
334
+ if return_multivector and len(data) > 1:
335
+ assert (
336
+ not return_numpy
337
+ ), "`return_numpy` is not supported when `return_multivector=True` and more than one data is encoded"
338
+ results = []
339
+ self.eval()
340
+ for batch in tqdm(dataloader, desc=desc, disable=self.verbosity == 0):
341
+ with torch.no_grad():
342
+ batch = {k: v.to(self.device) for k, v in batch.items()}
343
+ with torch.autocast(
344
+ device_type=torch.device(self.device).type, dtype=torch.bfloat16
345
+ ):
346
+ embeddings = self(**batch, task_label=task_label)
347
+ if not return_multivector:
348
+ embeddings = embeddings.single_vec_emb
349
+ if truncate_dim is not None:
350
+ embeddings = embeddings[:, :truncate_dim]
351
+ embeddings = torch.nn.functional.normalize(
352
+ embeddings, p=2, dim=-1
353
+ )
354
+ else:
355
+ embeddings = embeddings.multi_vec_emb
356
+
357
+ if return_multivector and not return_numpy:
358
+ valid_tokens = batch["attention_mask"].bool()
359
+ embeddings = [
360
+ emb[mask] for emb, mask in zip(embeddings, valid_tokens)
361
+ ]
362
+ results.append(embeddings)
363
+ else:
364
+ results.append(
365
+ embeddings.cpu()
366
+ if return_numpy
367
+ else list(torch.unbind(embeddings))
368
+ )
369
+ if return_numpy:
370
+ return np.concatenate([result.numpy() for result in results], axis=0)
371
+ return [item for sublist in results for item in sublist]
372
+
373
+ def _validate_encoding_params(
374
+ self,
375
+ truncate_dim: Optional[int] = None,
376
+ prompt_name: Optional[str] = None,
377
+ ) -> Dict[str, Any]:
378
+ encode_kwargs = {}
379
+ if prompt_name is not None:
380
+ if prompt_name not in PREFIX_DICT:
381
+ raise ValueError(
382
+ f"Invalid prompt_name: {prompt_name}. Must be one of {list(PREFIX_DICT.keys())}."
383
+ )
384
+ else:
385
+ encode_kwargs["prefix"] = (
386
+ PREFIX_DICT[prompt_name]
387
+ if self.task != "text-matching"
388
+ else PREFIX_DICT["query"]
389
+ )
390
+
391
+ truncate_dim = truncate_dim or self.config.truncate_dim
392
+ if truncate_dim is not None and truncate_dim not in self.config.matryoshka_dims:
393
+ raise ValueError(
394
+ f"Invalid truncate_dim: {truncate_dim}. Must be one of {self.config.matryoshka_dims}."
395
+ )
396
+ else:
397
+ encode_kwargs["truncate_dim"] = truncate_dim
398
+
399
+ return encode_kwargs
400
+
401
+ def _validate_task(self, task: Optional[str] = None) -> str:
402
+ if task is None:
403
+ if self.task is None:
404
+ raise ValueError(
405
+ "Task must be specified before encoding data. You can set it either as a model property "
406
+ "(e.g., model.task = 'retrieval') or pass it as an argument to the encode method."
407
+ )
408
+ task = self.task
409
+ else:
410
+ if task not in self.config.task_names:
411
+ raise ValueError(
412
+ f"Invalid task: {task}. Must be one of {self.config.task_names}."
413
+ )
414
+ return task
415
+
416
+ def encode_text(
417
+ self,
418
+ texts: Union[str, List[str]],
419
+ task: Optional[str] = None,
420
+ max_length: int = 32768,
421
+ batch_size: int = 8,
422
+ return_multivector: bool = False,
423
+ return_numpy: bool = False,
424
+ truncate_dim: Optional[int] = None,
425
+ prompt_name: Optional[str] = None,
426
+ ) -> Union[List[torch.Tensor], torch.Tensor]:
427
+ """
428
+ Encodes a list of texts into embeddings.
429
+
430
+ Args:
431
+ texts: text or list of text strings to encode
432
+ max_length: Maximum token length for text processing
433
+ batch_size: Number of texts to process at once
434
+ return_multivector: Whether to return multi-vector embeddings instead of single-vector embeddings
435
+ return_numpy: Whether to return numpy arrays instead of torch tensors
436
+ truncate_dim: Dimension to truncate embeddings to (128, 256, 512, or 1024)
437
+ prompt_name: Type of text being encoded ('query' or 'passage')
438
+
439
+ Returns:
440
+ List of text embeddings as tensors or numpy arrays when encoding multiple texts, or single text embedding as tensor when encoding a single text
441
+ """
442
+ prompt_name = prompt_name or "query"
443
+ encode_kwargs = self._validate_encoding_params(
444
+ truncate_dim=truncate_dim, prompt_name=prompt_name
445
+ )
446
+
447
+ task = self._validate_task(task)
448
+
449
+ processor_fn = partial(
450
+ self.processor.process_texts,
451
+ max_length=max_length,
452
+ prefix=encode_kwargs.pop("prefix"),
453
+ )
454
+
455
+ return_list = isinstance(texts, list)
456
+
457
+ # If return_multivector is True and encoding multiple texts, ignore return_numpy
458
+ if return_multivector and return_list and len(texts) > 1:
459
+ if return_numpy:
460
+ print(
461
+ "Warning: `return_numpy` is ignored when `return_multivector=True` and `len(texts) > 1`"
462
+ )
463
+ return_numpy = False
464
+
465
+ if isinstance(texts, str):
466
+ texts = [texts]
467
+
468
+ embeddings = self._process_batches(
469
+ data=texts,
470
+ processor_fn=processor_fn,
471
+ desc="Encoding texts...",
472
+ task_label=task,
473
+ return_multivector=return_multivector,
474
+ return_numpy=return_numpy,
475
+ batch_size=batch_size,
476
+ **encode_kwargs,
477
+ )
478
+
479
+ return embeddings if return_list else embeddings[0]
480
+
481
+ def _load_images_if_needed(
482
+ self, images: List[Union[str, Image.Image]]
483
+ ) -> List[Image.Image]:
484
+ loaded_images = []
485
+ for image in images:
486
+ if isinstance(image, str):
487
+ if image.startswith("http"):
488
+ response = requests.get(image)
489
+ image = Image.open(BytesIO(response.content)).convert("RGB")
490
+ else:
491
+ image = Image.open(image).convert("RGB")
492
+ loaded_images.append(image)
493
+ return loaded_images
494
+
495
+ def encode_image(
496
+ self,
497
+ images: Union[str, Image.Image, List[Union[str, Image.Image]]],
498
+ task: Optional[str] = None,
499
+ batch_size: int = 8,
500
+ return_multivector: bool = False,
501
+ return_numpy: bool = False,
502
+ truncate_dim: Optional[int] = None,
503
+ max_pixels: Optional[int] = None,
504
+ ) -> Union[List[torch.Tensor], torch.Tensor]:
505
+ """
506
+ Encodes a list of images or a single image into embedding(s).
507
+
508
+ Args:
509
+ images: image(s) to encode, can be PIL Image(s), URL(s), or local file path(s)
510
+ batch_size: Number of images to process at once
511
+ return_multivector: Whether to return multi-vector embeddings instead of single-vector embeddings
512
+ return_numpy: Whether to return numpy arrays instead of torch tensors. If `return_multivector` is `True` and more than one image is encoded, this parameter is ignored.
513
+ truncate_dim: Dimension to truncate embeddings to (128, 256, 512, or 1024)
514
+ max_pixels: Maximum number of pixels to process per image
515
+
516
+ Returns:
517
+ List of image embeddings as tensors or numpy arrays when encoding multiple images, or single image embedding as tensor when encoding a single image
518
+ """
519
+ if max_pixels:
520
+ default_max_pixels = self.processor.image_processor.max_pixels
521
+ self.processor.image_processor.max_pixels = (
522
+ max_pixels # change during encoding
523
+ )
524
+ encode_kwargs = self._validate_encoding_params(truncate_dim=truncate_dim)
525
+ task = self._validate_task(task)
526
+
527
+ return_list = isinstance(images, list)
528
+
529
+ # If return_multivector is True and encoding multiple images, ignore return_numpy
530
+ if return_multivector and return_list and len(images) > 1:
531
+ if return_numpy:
532
+ print(
533
+ "Warning: `return_numpy` is ignored when `return_multivector=True` and `len(images) > 1`"
534
+ )
535
+ return_numpy = False
536
+
537
+ # Convert single image to list
538
+ if isinstance(images, (str, Image.Image)):
539
+ images = [images]
540
+
541
+ images = self._load_images_if_needed(images)
542
+ embeddings = self._process_batches(
543
+ data=images,
544
+ processor_fn=self.processor.process_images,
545
+ desc="Encoding images...",
546
+ task_label=task,
547
+ batch_size=batch_size,
548
+ return_multivector=return_multivector,
549
+ return_numpy=return_numpy,
550
+ **encode_kwargs,
551
+ )
552
+
553
+ if max_pixels:
554
+ self.processor.image_processor.max_pixels = default_max_pixels
555
+
556
+ return embeddings if return_list else embeddings[0]
557
+
558
+ @classmethod
559
+ def from_pretrained(
560
+ cls,
561
+ pretrained_model_name_or_path,
562
+ *args,
563
+ **kwargs,
564
+ ):
565
+ """
566
+ Loads a pretrained model and configures it with the appropriate task adapter (`retrieval` by default).
567
+ """
568
+ if "torch_dtype" not in kwargs:
569
+ kwargs["torch_dtype"] = "auto"
570
+
571
+ kwargs["key_mapping"] = super()._checkpoint_conversion_mapping
572
+ if not is_flash_attn_2_available():
573
+ kwargs["attn_implementation"] = "sdpa"
574
+
575
+ base_model = super().from_pretrained(
576
+ pretrained_model_name_or_path, *args, **kwargs
577
+ )
578
+
579
+ # Configure adapter directory
580
+ if os.path.isdir(base_model.name_or_path):
581
+ adapter_dir = os.path.join(base_model.name_or_path, "adapters")
582
+ else:
583
+ adapter_cache_path = snapshot_download(
584
+ repo_id=base_model.name_or_path, allow_patterns=["adapters/*"]
585
+ )
586
+ adapter_dir = os.path.join(adapter_cache_path, "adapters")
587
+
588
+ lora_config = LoraConfig.from_pretrained(adapter_dir)
589
+ lora_config._custom_modules = {
590
+ torch.nn.modules.linear.Linear: partial(
591
+ MultiAdapterLinear,
592
+ task_names=base_model.config.task_names,
593
+ )
594
+ }
595
+ peft_model = PeftModel.from_pretrained(
596
+ model=base_model,
597
+ model_id=adapter_dir,
598
+ config=lora_config,
599
+ )
600
+
601
+ def task_getter(self):
602
+ return self.model.task
603
+
604
+ def task_setter(self, value):
605
+ self.model.task = value
606
+
607
+ peft_model.__class__.task = property(task_getter, task_setter)
608
+
609
+ return peft_model
modules.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b54acf92eab134d664abbbc5e42fd92f27535d3605666962c15dc3c32b2d9744
3
+ size 168
preprocessor_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45e4720bcf1eac214b977199a08aae9e50b5797033987c389b46f601cb359a68
3
+ size 726
qwen2_5_vl.py ADDED
The diff for this file is too large to render. See raw diff
 
results.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:304bc4d3d620c30be991b788fc0e5c3176495a404a6119d11dbc3c298d5ad575
3
+ size 20325
special_tokens_map.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76862e765266b85aa9459767e33cbaf13970f327a0e88d1c65846c2ddd3a1ecd
3
+ size 613
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13d28527663126ad9ab8a34aa6a4028b3f0b25f100defec89ee90b442d368dde
3
+ size 7306
vocab.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca10d7e9fb3ed18575dd1e277a2579c16d108e32f27439684afa0e10b1440910
3
+ size 2776833