HexQuant marco commited on
Commit
4ef77d9
·
verified ·
0 Parent(s):

Duplicate from llamaindex/vdr-multilingual-train

Browse files

Co-authored-by: Marco Cimolai <marco@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +59 -0
  2. README.md +213 -0
  3. datapipeline.png +3 -0
  4. de/train-00000-of-00021.parquet +3 -0
  5. de/train-00001-of-00021.parquet +3 -0
  6. de/train-00002-of-00021.parquet +3 -0
  7. de/train-00003-of-00021.parquet +3 -0
  8. de/train-00004-of-00021.parquet +3 -0
  9. de/train-00005-of-00021.parquet +3 -0
  10. de/train-00006-of-00021.parquet +3 -0
  11. de/train-00007-of-00021.parquet +3 -0
  12. de/train-00008-of-00021.parquet +3 -0
  13. de/train-00009-of-00021.parquet +3 -0
  14. de/train-00010-of-00021.parquet +3 -0
  15. de/train-00011-of-00021.parquet +3 -0
  16. de/train-00012-of-00021.parquet +3 -0
  17. de/train-00013-of-00021.parquet +3 -0
  18. de/train-00014-of-00021.parquet +3 -0
  19. de/train-00015-of-00021.parquet +3 -0
  20. de/train-00016-of-00021.parquet +3 -0
  21. de/train-00017-of-00021.parquet +3 -0
  22. de/train-00018-of-00021.parquet +3 -0
  23. de/train-00019-of-00021.parquet +3 -0
  24. de/train-00020-of-00021.parquet +3 -0
  25. en/train-00000-of-00019.parquet +3 -0
  26. en/train-00001-of-00019.parquet +3 -0
  27. en/train-00002-of-00019.parquet +3 -0
  28. en/train-00003-of-00019.parquet +3 -0
  29. en/train-00004-of-00019.parquet +3 -0
  30. en/train-00005-of-00019.parquet +3 -0
  31. en/train-00006-of-00019.parquet +3 -0
  32. en/train-00007-of-00019.parquet +3 -0
  33. en/train-00008-of-00019.parquet +3 -0
  34. en/train-00009-of-00019.parquet +3 -0
  35. en/train-00010-of-00019.parquet +3 -0
  36. en/train-00011-of-00019.parquet +3 -0
  37. en/train-00012-of-00019.parquet +3 -0
  38. en/train-00013-of-00019.parquet +3 -0
  39. en/train-00014-of-00019.parquet +3 -0
  40. en/train-00015-of-00019.parquet +3 -0
  41. en/train-00016-of-00019.parquet +3 -0
  42. en/train-00017-of-00019.parquet +3 -0
  43. en/train-00018-of-00019.parquet +3 -0
  44. es/train-00000-of-00021.parquet +3 -0
  45. es/train-00001-of-00021.parquet +3 -0
  46. es/train-00002-of-00021.parquet +3 -0
  47. es/train-00003-of-00021.parquet +3 -0
  48. es/train-00004-of-00021.parquet +3 -0
  49. es/train-00005-of-00021.parquet +3 -0
  50. es/train-00006-of-00021.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - de
4
+ - it
5
+ - fr
6
+ - es
7
+ - en
8
+ multilinguality:
9
+ - multilingual
10
+ size_categories:
11
+ - 100K<n<1M
12
+ pretty_name: Multilingual Visual Document Retrieval
13
+ dataset_info:
14
+ - config_name: en
15
+ features:
16
+ - name: id
17
+ dtype: string
18
+ - name: query
19
+ dtype: string
20
+ - name: image
21
+ dtype: image
22
+ - name: negatives
23
+ sequence:
24
+ dtype: string
25
+ - name: language
26
+ dtype: string
27
+ splits:
28
+ - name: train
29
+ num_bytes: 19695589638
30
+ num_examples: 94225
31
+ download_size: 19695589638
32
+ dataset_size: 19695589638
33
+ - config_name: es
34
+ features:
35
+ - name: id
36
+ dtype: string
37
+ - name: query
38
+ dtype: string
39
+ - name: image
40
+ dtype: image
41
+ - name: negatives
42
+ sequence:
43
+ dtype: string
44
+ - name: language
45
+ dtype: string
46
+ splits:
47
+ - name: train
48
+ num_bytes: 19881676198
49
+ num_examples: 102685
50
+ download_size: 19881676198
51
+ dataset_size: 19881676198
52
+ - config_name: it
53
+ features:
54
+ - name: id
55
+ dtype: string
56
+ - name: query
57
+ dtype: string
58
+ - name: image
59
+ dtype: image
60
+ - name: negatives
61
+ sequence:
62
+ dtype: string
63
+ - name: language
64
+ dtype: string
65
+ splits:
66
+ - name: train
67
+ num_bytes: 20278641470
68
+ num_examples: 98747
69
+ download_size: 20278641470
70
+ dataset_size: 20278641470
71
+ - config_name: de
72
+ features:
73
+ - name: id
74
+ dtype: string
75
+ - name: query
76
+ dtype: string
77
+ - name: image
78
+ dtype: image
79
+ - name: negatives
80
+ sequence:
81
+ dtype: string
82
+ - name: language
83
+ dtype: string
84
+ splits:
85
+ - name: train
86
+ num_bytes: 19629975126
87
+ num_examples: 100713
88
+ download_size: 19629975126
89
+ dataset_size: 19629975126
90
+ - config_name: fr
91
+ features:
92
+ - name: id
93
+ dtype: string
94
+ - name: query
95
+ dtype: string
96
+ - name: image
97
+ dtype: image
98
+ - name: negatives
99
+ sequence:
100
+ dtype: string
101
+ - name: language
102
+ dtype: string
103
+ splits:
104
+ - name: train
105
+ num_bytes: 20825335207
106
+ num_examples: 99797
107
+ download_size: 20825335207
108
+ dataset_size: 20825335207
109
+ configs:
110
+ - config_name: en
111
+ data_files:
112
+ - split: train
113
+ path: en/train-*
114
+ - config_name: it
115
+ data_files:
116
+ - split: train
117
+ path: it/train-*
118
+ - config_name: fr
119
+ data_files:
120
+ - split: train
121
+ path: fr/train-*
122
+ - config_name: es
123
+ data_files:
124
+ - split: train
125
+ path: es/train-*
126
+ - config_name: de
127
+ data_files:
128
+ - split: train
129
+ path: de/train-*
130
+ license: apache-2.0
131
+ ---
132
+
133
+ # Multilingual Visual Document Retrieval Dataset
134
+
135
+ ![](training_cover.png)
136
+
137
+ > This dataset consists of **500k multilingual query image samples**, collected and generated from scratch using public internet pdfs. The queries are synthetic and generated using VLMs (gemini-1.5-pro and Qwen2-VL-72B).
138
+
139
+ It was used to train the [vdr-2b-multi-v1](https://huggingface.co/llamaindex/vdr-2b-multi-v1) retrieval multimodal, multilingual embedding model.
140
+
141
+ ## How it was created
142
+
143
+ This is the entire data pipeline used to create the Italian subset of this dataset. Each step of the process is explained in detail below.
144
+
145
+ ![](datapipeline.png)
146
+
147
+ #### Data gathering
148
+
149
+ For each language, we generate a long list of search queries covering many different topics, which are then used to search for PDFs. We use the language filtering capabilities of the search engine to scrape documents that are only in the specified language. This "search by topic" technique ensures that the model has seen a lot of diverse topics and domains, and that it performs well in real life scenarios.
150
+
151
+ The scraping process produced ~50k multilingual documents. Contrary to the method used in the previous [`mcdse-2b-v1`](https://huggingface.co/marco/mcdse-2b-v1) model, pages were not extracted randomly. Instead, each page of each PDF was run through a document layout analysis model to determine whether the page contained more textual or visual elements. The result is a number that classifies the page as text-only, visual-only or mixed. This labelling step was then used to sample ~100k pages, ensuring they were evenly distributed by page type.
152
+
153
+ #### Synthetic generation
154
+ The queries were then generated using gemini-1.5-pro and Qwen2-VL-72B. They were tasked to come up with a specific and a general question. Only the specific question is then used to train the model, but forcing the LLM to distinguish between the two often resulted in stronger specific questions for information retrieval training.
155
+
156
+ After generation, a further cleaning step ensures that the questions are good enough for training. This includes:
157
+
158
+ - Ensuring the language is correct
159
+ - Fix formatting problems
160
+ - Remove markdown
161
+ - Ensuring that only one question is posed
162
+ - Removing grounding phrases (e.g. "according to Figure 1", "this document", ...)
163
+
164
+
165
+ #### Filtering and hard-negative mining
166
+
167
+ This cleaning step ensures that the queries are syntactically correct and follow some strict guidelines. But it still doesn't ensure that the queries are good enough for information retrieval.
168
+
169
+ To filter out bad questions, we have embedded and indexed each broad query with the voyage-3 embedding model. For each specific question, we search the index. The query is marked as 'good' if its associated broad question appears in the top 100 results. This method removes low entropy, duplicate or too similar questions. On average, 40% of queries were removed from each language dataset.
170
+
171
+ Hard negatives were then mined using voyage-3 only on specific questions with a fixed threshold of 0.75. Experiments were also carried out using positive aware negative mining as used by [nvidia/NV-Retriever-v1](https://huggingface.co/nvidia/NV-Retriever-v1), but on this dataset it seems to produce too easy/distant negatives.
172
+
173
+ # Info and usage
174
+
175
+ The training dataset consists of 496,167 PDF pages, of which only 280,679 are associated with the filtered queries (using the method described above). The images that remain without a query are still used as hard negatives.
176
+
177
+ | Language | # filtered queries | # unfiltered queries |
178
+ |----------:|-------------------:|---------------------:|
179
+ | English | 53,512 | 94,225 |
180
+ | Spanish | 58,738 | 102,685 |
181
+ | Italian | 54,942 | 98,747 |
182
+ | German | 58,217 | 100,713 |
183
+ | French | 55,270 | 99,797 |
184
+ | **TOTAL** | **280,679** | **496,167** |
185
+
186
+ ### Schema
187
+
188
+ | **Column** | **Type** |
189
+ |-----------:|--------------:|
190
+ | id | str |
191
+ | query | str |
192
+ | image | image |
193
+ | negatives | array[string] |
194
+ | language | string |
195
+
196
+ The `id` column represents the identification number of the positive image. The `negatives` column contains all the ids of the associated negatives, sorted in ascending order by their distance from the positive.
197
+ The last rows do not contain any negatives or queries, as their queries have been filtered out by the data curation process. The images are still being used as negatives for other earlier queries.
198
+
199
+ The dataset consists of 5 different subsets for each language. You can download languages individually by specifying the language subset in [`load_dataset`](https://huggingface.co/docs/datasets/main/en/package_reference/loading_methods#datasets.load_dataset):
200
+
201
+ ```python
202
+ from datasets import load_dataset
203
+
204
+ italian_dataset = load_dataset("llamaindex/vdr-multilingual-train", "it", split="train")
205
+
206
+ english_dataset = load_dataset("llamaindex/vdr-multilingual-train", "en", split="train")
207
+
208
+ french_dataset = load_dataset("llamaindex/vdr-multilingual-train", "fr", split="train")
209
+
210
+ german_dataset = load_dataset("llamaindex/vdr-multilingual-train", "de", split="train")
211
+
212
+ spanish_dataset = load_dataset("llamaindex/vdr-multilingual-train", "es", split="train")
213
+ ```
datapipeline.png ADDED

Git LFS Details

  • SHA256: cd2c891e102ee8218c0fa1d6a365c87278bfd93453f5bae0a04027797bcca550
  • Pointer size: 131 Bytes
  • Size of remote file: 114 kB
de/train-00000-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ee99da640d936300637ad41e6c51b0dfec7fb042c93775e24459adcaae22f29
3
+ size 950358955
de/train-00001-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9ac9b3ba478a632aeeb6e4409775b79c3e27d173d6dfdd97eea847bef6ac9b4
3
+ size 944740366
de/train-00002-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7337a3c85373326911120558508170426486a698fe25113286dc2985fd9e0f92
3
+ size 962971620
de/train-00003-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0238b795b624c8abfe7e3d123306865d090fc84ceb5dab5e4d44b0ffff39d8c3
3
+ size 957508678
de/train-00004-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4ce2caaaa1b6be638a1132b8e3dfea7fbf8902304fd517d9174ebb03217900e
3
+ size 956612380
de/train-00005-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9623730ccf2f53e022ab23cfe70706ac208f68bb7adc4949ded35ed9789d351e
3
+ size 986946151
de/train-00006-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c749d2f4ffdd7a2f9ab5e92285b26f45e1be6867b728192056dae13f59ab1656
3
+ size 931996945
de/train-00007-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f84d30475465f807d1a4626f60ab97b95749cd1dfa01dc19d61f881719436be1
3
+ size 1011264740
de/train-00008-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf53fa07a94f9424cb4a88b5df4c0182c9432b3d95c78e591a5f4a424980f8d1
3
+ size 941367731
de/train-00009-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c6d8975b3b265bb154c74bbc8422ab2aa143ad0588ff4b62b8d4219befeed0c
3
+ size 937512706
de/train-00010-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:528220f45e9500e55c9659bebf470cde9926665f6bcfad902dd045a7a9345ce4
3
+ size 985784568
de/train-00011-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14727e27c751986e1d0c4cca15510c3000614f769f0bd6c253366d9fe6fcb7ca
3
+ size 1034102579
de/train-00012-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab7bcd40d00374d3c642485bf63d0ff1b9051eeada31e1fc20b7ece7f66f239d
3
+ size 1002393422
de/train-00013-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8069889b5891b2b34df0851ff1acbab77d78d9cf1f453dccf5cb39e1027698d
3
+ size 971849945
de/train-00014-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c2d52e649121b46fec66e4e59afd4f8b35dc1a1cc3b58a1c96b2f45d95e7942
3
+ size 978281621
de/train-00015-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ef52b21c55b0aa21a8755fe11b9c306f50937d798b02e7de204191b5da64700
3
+ size 963005294
de/train-00016-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d3ed696afd25ede62ac9e108928d27b7db2b7a6d9ba3188dce77b0a62522bf7
3
+ size 986361540
de/train-00017-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad411016005af2f4ef7fbf048fee34fd89f9ac37de002aa59e6c0205654ab051
3
+ size 1020408533
de/train-00018-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:762f1e116322c14e78d5069b554cfd8101aac91a81f2fd047c2b66bae688d38e
3
+ size 1006955780
de/train-00019-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f937fce2e9f98a01c12063908c8c3f2d4f1f7322d89e92eac5fdfe14688428b4
3
+ size 957400597
de/train-00020-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b3433fa1c27a562708521b025d7a038ab47aa81ba699be6f8d309d6a04a5aa7
3
+ size 142150975
en/train-00000-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6611f392311a9ef9d1f5c36490d45dfab042b5d200033b4e02bdb72a16d14e3f
3
+ size 1000602176
en/train-00001-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f1e25503f7273d46e84ec302ef82c8f6a3724cdd2b27fe0fc3791a4f7e14dff
3
+ size 1002981980
en/train-00002-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13739bf930292fe1cbcb0a210372190d31ddf0fcb4c067d60851e6527379067e
3
+ size 1023054474
en/train-00003-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e831ac156e2fed5fded36049b6c0ad3ad9256e72acc63bd6c75c8b27fe99229
3
+ size 993117724
en/train-00004-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adeea08b8f970b21f3c9af685641bb860cf8ccbd926a159731cfa47ee4f7beb5
3
+ size 974671170
en/train-00005-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e9cff9062fe6525f1d5421ebaa1bcb8753b1b2d3fa6e0d6e4940521aec6bf57
3
+ size 1045999013
en/train-00006-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4484796d3b9fbb8b8acfe0f63ed2d024c92cb71b0fb149f4696d4cc6ed447f2
3
+ size 969827909
en/train-00007-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50b610ede096ad6df71ddbeb1ffbac182783cab9c5e3b17625223e2e95d0ba76
3
+ size 1056815328
en/train-00008-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e405c0f04bc738fe38c17e628b8ab4b0a8c16f10f85b5e0079492c0c507ca1
3
+ size 992015496
en/train-00009-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35d2e58d6a1cbbbcc0e45744d913517943bc8d24850cc293a15ae96d1d244146
3
+ size 995972083
en/train-00010-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c965a52a7a5ad1b5fc0fa4c4207d2471ba4fd5657b2305c6fcb849ba06345b2
3
+ size 1129828848
en/train-00011-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e603e6bb5f5a9d03906eefaa678427467aa475def7b7d6d0748e06756208bbc
3
+ size 1081374991
en/train-00012-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3362cc8560b0c0e14bf314a141495b89cfad4be28e6f5235233b8ae03111900f
3
+ size 1107912771
en/train-00013-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec7e79be2a0a46827e5d5123684f4eff98bc2aa7ecfc3674feacb1f31861c501
3
+ size 1108923137
en/train-00014-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2018e80bc819a4b2d9e21f7e3b2f3d39f43da86a55d958e14cead863ee7f2a32
3
+ size 1061394496
en/train-00015-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:465f4057324eb7d041f6d6d9c7f5323d63503dc567686d868d4d08ac3945b3e3
3
+ size 1082455053
en/train-00016-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b46df2d08806f9c67dbbab6c8e3c45695e542b0a0613e284288afc821e9d48a8
3
+ size 1093206326
en/train-00017-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6576f60fe34be4b3e65a326b9488c516249dd1ff785129d58a74912aca7af346
3
+ size 1081776030
en/train-00018-of-00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:442299645c4ef6a4da0cddd84c54199517f5d7441574ff4ce2e5fd8114131ca6
3
+ size 893660633
es/train-00000-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f238f4b21d676c95f8e63cfacd2fe78fff9105ae0c23d147bb64b6dea6f6c61
3
+ size 935091211
es/train-00001-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1af3a26d2fdacb9a3dd67c8966f4e10297891d146a59c180c9bba9e77b6a4887
3
+ size 944786581
es/train-00002-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:902a96300d1acc1b36977351cffdc5c2219dc2f5ca07ed20843d807c24aff4c9
3
+ size 922054710
es/train-00003-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06930ef80cd0b42924a149ca7753e8b0250957ba1c4981a7e9262bdf512155ac
3
+ size 950367711
es/train-00004-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac28830038f73b60dba8d6d8c9dcb3c55a606e4ff0c35a3b65d19f1171a4af54
3
+ size 962250707
es/train-00005-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a3ab942eb79c29d38a0b5bfc77a5d7cadcd128d50a6ea6159ee0de13234c056
3
+ size 970315732
es/train-00006-of-00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:300b2c110932b602bcdca8d99e1d487ccd2f259349c863c7451ebe8cb4408db5
3
+ size 921565924