Datasets:
First version of BLAB
Browse files- README.md +251 -0
- blab_long_audio.py +352 -0
README.md
CHANGED
|
@@ -1,3 +1,254 @@
|
|
| 1 |
---
|
| 2 |
license: cc-by-4.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: cc-by-4.0
|
| 3 |
+
mutilinguality:
|
| 4 |
+
- monolingual
|
| 5 |
+
task_categories:
|
| 6 |
+
- audio-text-to-text
|
| 7 |
+
size_categories:
|
| 8 |
+
- 1K<n<10K
|
| 9 |
+
source_datasets:
|
| 10 |
+
- original
|
| 11 |
+
pretty_name: BLAB (Brutally Long Audio Bench)
|
| 12 |
+
tags:
|
| 13 |
+
- speech
|
| 14 |
+
- audio
|
| 15 |
+
- speech-llm
|
| 16 |
+
- audio-lm
|
| 17 |
+
- long-audio
|
| 18 |
+
- spoken-language-understanding
|
| 19 |
+
viewer: true
|
| 20 |
+
configs:
|
| 21 |
+
- config_name: word_localization
|
| 22 |
+
features:
|
| 23 |
+
- name: video_url
|
| 24 |
+
dtype: string
|
| 25 |
+
- name: audio
|
| 26 |
+
dtype: string
|
| 27 |
+
- name: question
|
| 28 |
+
dtype: string
|
| 29 |
+
- name: answer_type
|
| 30 |
+
dtype: string
|
| 31 |
+
- name: groundtruth
|
| 32 |
+
dtype: LargeList
|
| 33 |
+
inner_dtype:
|
| 34 |
+
- name: word
|
| 35 |
+
dtype: string
|
| 36 |
+
- name: start
|
| 37 |
+
dtype: float32
|
| 38 |
+
- name: end
|
| 39 |
+
dtype: float32
|
| 40 |
+
- config_name: advertisement_localization
|
| 41 |
+
features:
|
| 42 |
+
- name: video_url
|
| 43 |
+
dtype: string
|
| 44 |
+
- name: audio
|
| 45 |
+
dtype: string
|
| 46 |
+
- name: question
|
| 47 |
+
dtype: string
|
| 48 |
+
- name: answer_type
|
| 49 |
+
dtype: string
|
| 50 |
+
- name: groundtruth
|
| 51 |
+
dtype: Struct
|
| 52 |
+
fields:
|
| 53 |
+
- name: ads_segment
|
| 54 |
+
dtype: LargeList
|
| 55 |
+
inner_dtype:
|
| 56 |
+
- name: text
|
| 57 |
+
dtype: string
|
| 58 |
+
- name: start
|
| 59 |
+
dtype: float32
|
| 60 |
+
- name: end
|
| 61 |
+
dtype: float32
|
| 62 |
+
- name: word_timestamp
|
| 63 |
+
dtype: LargeList
|
| 64 |
+
inner_dtype:
|
| 65 |
+
- name: word
|
| 66 |
+
dtype: string
|
| 67 |
+
- name: start
|
| 68 |
+
dtype: float32
|
| 69 |
+
- name: end
|
| 70 |
+
dtype: float32
|
| 71 |
+
|
| 72 |
+
- config_name: named_entity_localization
|
| 73 |
+
features:
|
| 74 |
+
- name: video_url
|
| 75 |
+
dtype: string
|
| 76 |
+
- name: audio
|
| 77 |
+
dtype: string
|
| 78 |
+
- name: question
|
| 79 |
+
dtype: string
|
| 80 |
+
- name: answer_type
|
| 81 |
+
dtype: string
|
| 82 |
+
- name: groundtruth
|
| 83 |
+
dtype: Struct
|
| 84 |
+
fields:
|
| 85 |
+
- name: entities
|
| 86 |
+
dtype: LargeList
|
| 87 |
+
inner_dtype:
|
| 88 |
+
- name: entity_type
|
| 89 |
+
dtype: string
|
| 90 |
+
- name: entity
|
| 91 |
+
dtype: string
|
| 92 |
+
- name: start
|
| 93 |
+
dtype: float32
|
| 94 |
+
- name: end
|
| 95 |
+
dtype: float32
|
| 96 |
+
- name: word_timestamp
|
| 97 |
+
dtype: LargeList
|
| 98 |
+
inner_dtype:
|
| 99 |
+
- name: word
|
| 100 |
+
dtype: string
|
| 101 |
+
- name: start
|
| 102 |
+
dtype: float32
|
| 103 |
+
- name: end
|
| 104 |
+
dtype: float32
|
| 105 |
+
- config_name: speaker_number_estimation
|
| 106 |
+
features:
|
| 107 |
+
- name: video_url
|
| 108 |
+
dtype: string
|
| 109 |
+
- name: audio
|
| 110 |
+
dtype: string
|
| 111 |
+
- name: question
|
| 112 |
+
dtype: string
|
| 113 |
+
- name: groundtruth
|
| 114 |
+
dtype: Sequence # It's a list, even if it might contain one element
|
| 115 |
+
inner_dtype:
|
| 116 |
+
dtype: int32 # The type of elements within the list
|
| 117 |
+
- config_name: entire_duration
|
| 118 |
+
features:
|
| 119 |
+
- name: video_url
|
| 120 |
+
dtype: string
|
| 121 |
+
- name: audio
|
| 122 |
+
dtype: string
|
| 123 |
+
- name: question
|
| 124 |
+
dtype: string
|
| 125 |
+
- name: groundtruth
|
| 126 |
+
dtype: float32
|
| 127 |
+
- config_name: event_duration
|
| 128 |
+
features:
|
| 129 |
+
- name: video_url
|
| 130 |
+
dtype: string
|
| 131 |
+
- name: audio
|
| 132 |
+
dtype: string
|
| 133 |
+
- name: question
|
| 134 |
+
dtype: string
|
| 135 |
+
- name: answer_type
|
| 136 |
+
dtype: string
|
| 137 |
+
- name: groundtruth
|
| 138 |
+
dtype: float32
|
| 139 |
+
- config_name: emotion_ranking
|
| 140 |
+
features:
|
| 141 |
+
- name: video_url
|
| 142 |
+
dtype: string
|
| 143 |
+
- name: audio
|
| 144 |
+
dtype: string
|
| 145 |
+
- name: type
|
| 146 |
+
dtype: string
|
| 147 |
+
- name: question
|
| 148 |
+
dtype: string
|
| 149 |
+
- name: correct_option
|
| 150 |
+
dtype: string
|
| 151 |
+
- name: option_A
|
| 152 |
+
dtype: string
|
| 153 |
+
- name: option_B
|
| 154 |
+
dtype: string
|
| 155 |
+
- name: option_C
|
| 156 |
+
dtype: string
|
| 157 |
+
- name: option_D
|
| 158 |
+
dtype: string
|
| 159 |
+
- name: option_E
|
| 160 |
+
dtype: string
|
| 161 |
+
- name: correct_answer
|
| 162 |
+
dtype: string
|
| 163 |
+
- config_name: emotion_reasoning
|
| 164 |
+
features:
|
| 165 |
+
- name: video_url
|
| 166 |
+
dtype: string
|
| 167 |
+
- name: audio
|
| 168 |
+
dtype: string
|
| 169 |
+
- name: type
|
| 170 |
+
dtype: string
|
| 171 |
+
- name: question
|
| 172 |
+
dtype: string
|
| 173 |
+
- name: correct_option
|
| 174 |
+
dtype: string
|
| 175 |
+
- name: option_A
|
| 176 |
+
dtype: string
|
| 177 |
+
- name: option_B
|
| 178 |
+
dtype: string
|
| 179 |
+
- name: option_C
|
| 180 |
+
dtype: string
|
| 181 |
+
- name: option_D
|
| 182 |
+
dtype: string
|
| 183 |
+
- name: correct_answer
|
| 184 |
+
dtype: string
|
| 185 |
---
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
# BLAB: Brutally Long Audio Bench
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
## Dataset Summary
|
| 192 |
+
Brutally Long Audio Bench (BLAB) is a challenging long-form audio benchmark that evaluates audio LMs on localization, duration estimation, emotion, and counting tasks using audio segments averaging 51 minutes in length. BLAB consists of 833+ hours of diverse, full-length Youtube audio clips, each paired with human-annotated, text-based natural language questions and answers. Our audio data were collected from permissively licensed sources and underwent a human-assisted filtering process to ensure task compliance.
|
| 193 |
+
|
| 194 |
+
NB: This data should only be used for evaluation purposes and not for model training.
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
## Tasks Covered in BLAB
|
| 199 |
+
|
| 200 |
+
### Localization
|
| 201 |
+
* **Word Localization:** Locate the exact start and end times of specific words within the audio.
|
| 202 |
+
* **Named Entity Localization:** Detect and locate the exact start and end times of named entities (e.g., people, organizations, locations).
|
| 203 |
+
* **Advertisement Localization:** Locate and transcribe advertisement segments within a podcast.
|
| 204 |
+
|
| 205 |
+
### Counting
|
| 206 |
+
* **Speaker Number Estimation:** Determine the number of unique speakers present in the full audio segment.
|
| 207 |
+
|
| 208 |
+
### Duration
|
| 209 |
+
* **Event Duration:** Calculate the duration of specific acoustic events (e.g., laughter in a comedy special, question-and-answer segments in a panel session, or a particular speaker’s total speaking time in a meeting) within an audio sample,.
|
| 210 |
+
* **Entire Duration:** Estimate the total duration of an audio file, expressed in seconds.
|
| 211 |
+
|
| 212 |
+
### Emotion
|
| 213 |
+
* **Emotion Reasoning:** Reason over emotional expressions conveyed in the audio.
|
| 214 |
+
* **Emotion Ranking:** Rank different emotional expressions of speech and non-verbal sound present in the audio.
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
## Dataset Structure
|
| 219 |
+
|
| 220 |
+
To load a specific task from BLAB, you'll need to specify its configuration name. Keep in mind that **BLAB provides URLs to the YouTube audio files, not the actual audio files themselves.** You'll need to download the audio from these URLs separately.
|
| 221 |
+
|
| 222 |
+
```python
|
| 223 |
+
from datasets import load_dataset
|
| 224 |
+
|
| 225 |
+
# Load the Word Localization task
|
| 226 |
+
word_localization_data = load_dataset("oreva/blab_long_audio", "word_localization")
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
# Load the Named Entity Localization task
|
| 230 |
+
named_entity_localization_data = load_dataset("oreva/blab_long_audio", "named_entity_localization")
|
| 231 |
+
|
| 232 |
+
# You can load any other task similarly:
|
| 233 |
+
# speaker_number_estimation_data = load_dataset("oreva/blab_long_audio", "speaker_number_estimation")
|
| 234 |
+
# entire_duration_data = load_dataset("oreva/blab_long_audio", "entire_duration")
|
| 235 |
+
# event_duration_data = load_dataset("oreva/blab_long_audio", "event_duration")
|
| 236 |
+
# emotion_reasoning_data = load_dataset("oreva/blab_long_audio", "emotion_reasoning")
|
| 237 |
+
# emotion_ranking_data = load_dataset("oreva/blab_long_audio", "emotion_ranking")
|
| 238 |
+
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
# Citation
|
| 242 |
+
|
| 243 |
+
```
|
| 244 |
+
@misc{ahia2025blabbrutallylongaudio,
|
| 245 |
+
title={BLAB: Brutally Long Audio Bench},
|
| 246 |
+
author={Orevaoghene Ahia and Martijn Bartelds and Kabir Ahuja and Hila Gonen and Valentin Hofmann and Siddhant Arora and Shuyue Stella Li and Vishal Puttagunta and Mofetoluwa Adeyemi and Charishma Buchireddy and Ben Walls and Noah Bennett and Shinji Watanabe and Noah A. Smith and Yulia Tsvetkov and Sachin Kumar},
|
| 247 |
+
year={2025},
|
| 248 |
+
eprint={2505.03054},
|
| 249 |
+
archivePrefix={arXiv},
|
| 250 |
+
primaryClass={cs.AI},
|
| 251 |
+
url={https://arxiv.org/abs/2505.03054},
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
```
|
blab_long_audio.py
ADDED
|
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import datasets
|
| 4 |
+
from datasets import Features, Value, DatasetInfo, SplitGenerator, BuilderConfig, LargeList, Sequence
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
TASKS = [
|
| 9 |
+
"word_localization",
|
| 10 |
+
"advertisement_localization",
|
| 11 |
+
"named_entity_localization",
|
| 12 |
+
"speaker_number_estimation",
|
| 13 |
+
"entire_duration",
|
| 14 |
+
"event_duration",
|
| 15 |
+
"emotion_ranking",
|
| 16 |
+
"emotion_reasoning",
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
_DOCUMENT_DATASET_VERSION = "1.0.0"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# --- Main Dataset Builder Class ---
|
| 25 |
+
class BLAB(datasets.GeneratorBasedBuilder):
|
| 26 |
+
"""class BLAB(object): A dataset builder supporting various audio QA tasks,
|
| 27 |
+
each with its own specific data schema.
|
| 28 |
+
"""
|
| 29 |
+
BUILDER_CONFIGS = [
|
| 30 |
+
BuilderConfig(
|
| 31 |
+
name=task,
|
| 32 |
+
version=datasets.Version(_DOCUMENT_DATASET_VERSION),
|
| 33 |
+
description=f"BLAB dataset for task: {task}",
|
| 34 |
+
) for task in TASKS
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
def _info(self):
|
| 38 |
+
"""Defines the dataset schema (features) based on the selected task configuration."""
|
| 39 |
+
# --- Schema Definitions for each individual task ---
|
| 40 |
+
|
| 41 |
+
if self.config.name == "word_localization":
|
| 42 |
+
return DatasetInfo(
|
| 43 |
+
features=Features({
|
| 44 |
+
"video_url": Value("string"),
|
| 45 |
+
"audio": Value("string"),
|
| 46 |
+
"question": Value("string"),
|
| 47 |
+
"groundtruth": LargeList(
|
| 48 |
+
feature=Features({
|
| 49 |
+
"word": Value("string"),
|
| 50 |
+
"start": Value("float32"),
|
| 51 |
+
"end": Value("float32"),
|
| 52 |
+
})
|
| 53 |
+
)
|
| 54 |
+
}),
|
| 55 |
+
description="Schema for the Word Localization task: segmenting and labeling words.",
|
| 56 |
+
license="MIT",
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
elif self.config.name == "advertisement_localization":
|
| 60 |
+
return DatasetInfo(
|
| 61 |
+
features=Features({
|
| 62 |
+
"video_url": Value("string"),
|
| 63 |
+
"audio": Value("string"),
|
| 64 |
+
"question": Value("string"),
|
| 65 |
+
"groundtruth": Features({
|
| 66 |
+
"ads_segment": LargeList(
|
| 67 |
+
feature=Features({
|
| 68 |
+
"text": Value("string"),
|
| 69 |
+
"start": Value("float32"),
|
| 70 |
+
"end": Value("float32"),
|
| 71 |
+
}),
|
| 72 |
+
),
|
| 73 |
+
"word_timestamp": LargeList(
|
| 74 |
+
feature=Features({
|
| 75 |
+
"word": Value("string"),
|
| 76 |
+
"start": Value("float32"),
|
| 77 |
+
"end": Value("float32"),
|
| 78 |
+
}),
|
| 79 |
+
),
|
| 80 |
+
})
|
| 81 |
+
}),
|
| 82 |
+
description="Schema for Advertisement Localization task: identifying ad segments and their transcripts.",
|
| 83 |
+
# ... (other metadata)
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
elif self.config.name == "named_entity_localization":
|
| 87 |
+
return DatasetInfo(
|
| 88 |
+
features=Features({
|
| 89 |
+
"video_url": Value("string"),
|
| 90 |
+
"audio": Value("string"),
|
| 91 |
+
"question": Value("string"),
|
| 92 |
+
"groundtruth": Features({
|
| 93 |
+
"entities": LargeList(
|
| 94 |
+
feature=Features({
|
| 95 |
+
"entity_type": Value("string"),
|
| 96 |
+
"entity": Value("string"),
|
| 97 |
+
"start": Value("float32"),
|
| 98 |
+
"end": Value("float32"),
|
| 99 |
+
}),
|
| 100 |
+
),
|
| 101 |
+
"word_timestamp": LargeList(
|
| 102 |
+
feature=Features({
|
| 103 |
+
"word": Value("string"),
|
| 104 |
+
"start": Value("float32"),
|
| 105 |
+
"end": Value("float32"),
|
| 106 |
+
}),
|
| 107 |
+
),
|
| 108 |
+
})
|
| 109 |
+
}),
|
| 110 |
+
description="Schema for Named Entity Localization task: identifying specific entities and their timestamps.",
|
| 111 |
+
# ... (other metadata)
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
elif self.config.name == "speaker_number_estimation":
|
| 115 |
+
return DatasetInfo(
|
| 116 |
+
features=Features({
|
| 117 |
+
"video_url": Value("string"),
|
| 118 |
+
"audio": Value("string"),
|
| 119 |
+
"question": Value("string"),
|
| 120 |
+
"groundtruth": Sequence(Value("int32"))
|
| 121 |
+
}),
|
| 122 |
+
description="Schema for Speaker Number Estimation task: counting speakers in a segment.",
|
| 123 |
+
# ... (other metadata)
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
elif self.config.name == "entire_duration":
|
| 127 |
+
return DatasetInfo(
|
| 128 |
+
features=Features({
|
| 129 |
+
"video_url": Value("string"),
|
| 130 |
+
"audio": Value("string"),
|
| 131 |
+
"question": Value("string"),
|
| 132 |
+
"groundtruth": Value("float32")
|
| 133 |
+
}),
|
| 134 |
+
description="Schema for Entire Duration task: determining the total duration of an audio.",
|
| 135 |
+
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
elif self.config.name == "event_duration":
|
| 139 |
+
return DatasetInfo(
|
| 140 |
+
features=Features({
|
| 141 |
+
"video_url": Value("string"),
|
| 142 |
+
"audio": Value("string"),
|
| 143 |
+
"question": Value("string"),
|
| 144 |
+
"groundtruth": Value("float32"),
|
| 145 |
+
"answer_type": Value("string"),
|
| 146 |
+
}),
|
| 147 |
+
description="Schema for Event Duration task: identifying and timing specific events.",
|
| 148 |
+
# ... (other metadata)
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
elif self.config.name == "emotion_ranking":
|
| 152 |
+
return DatasetInfo(
|
| 153 |
+
features=Features({
|
| 154 |
+
"video_url": Value("string"),
|
| 155 |
+
"audio": Value("string"),
|
| 156 |
+
"question": Value("string"),
|
| 157 |
+
"type": Value("string"),
|
| 158 |
+
"correct_option": Value("string"),
|
| 159 |
+
"option_A": Value("string"),
|
| 160 |
+
"option_B": Value("string"),
|
| 161 |
+
"option_C": Value("string"),
|
| 162 |
+
"option_D": Value("string"),
|
| 163 |
+
"option_E": Value("string"),
|
| 164 |
+
"correct_answer": Value("string"), # Stores the correct_answer string
|
| 165 |
+
}),
|
| 166 |
+
description="Schema for Emotion Ranking task: selecting the best emotion option.",
|
| 167 |
+
# ... (other metadata)
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
elif self.config.name == "emotion_reasoning":
|
| 171 |
+
return DatasetInfo(
|
| 172 |
+
features=Features({
|
| 173 |
+
"video_url": Value("string"),
|
| 174 |
+
"audio": Value("string"),
|
| 175 |
+
"question": Value("string"),
|
| 176 |
+
"type": Value("string"),
|
| 177 |
+
"correct_option": Value("string"),
|
| 178 |
+
"option_A": Value("string"),
|
| 179 |
+
"option_B": Value("string"),
|
| 180 |
+
"option_C": Value("string"),
|
| 181 |
+
"option_D": Value("string"),
|
| 182 |
+
"correct_answer": Value("string"), # Stores the correct_answer string
|
| 183 |
+
}),
|
| 184 |
+
description="Schema for Emotion Reasoning task: explaining emotional context.",
|
| 185 |
+
# ... (other metadata)
|
| 186 |
+
)
|
| 187 |
+
else:
|
| 188 |
+
raise ValueError(f"Unknown config name: {self.config.name}")
|
| 189 |
+
|
| 190 |
+
def _split_generators(self, dl_manager):
|
| 191 |
+
"""Returns SplitGenerators based on the selected task configuration."""
|
| 192 |
+
data_files = {}
|
| 193 |
+
|
| 194 |
+
if self.config.name == "word_localization":
|
| 195 |
+
data_files = {"word_localization": "blab_long_audio/word_localization.json"}
|
| 196 |
+
elif self.config.name == "advertisement_localization":
|
| 197 |
+
data_files = {"advertisement_localization": "blab_long_audio/advertisement_localization.json"}
|
| 198 |
+
elif self.config.name == "named_entity_localization":
|
| 199 |
+
data_files = {"named_entity_localization": "blab_long_audio/named_entity_localization.json"}
|
| 200 |
+
elif self.config.name == "speaker_number_estimation":
|
| 201 |
+
data_files = {"speaker_number_estimation": "blab_long_audio/speaker_number_estimation.json"}
|
| 202 |
+
elif self.config.name == "entire_duration":
|
| 203 |
+
data_files = {"entire_duration": "blab_long_audio/entire_duration.json"}
|
| 204 |
+
elif self.config.name == "event_duration":
|
| 205 |
+
data_files = {"event_duration": "blab_long_audio/event_duration.json"}
|
| 206 |
+
elif self.config.name == "emotion_ranking":
|
| 207 |
+
data_files = {"emotion_ranking": "blab_long_audio/emotion_ranking.json"}
|
| 208 |
+
elif self.config.name == "emotion_reasoning":
|
| 209 |
+
data_files = {"emotion_reasoning": "blab_long_audio/emotion_reasoning.json"}
|
| 210 |
+
else:
|
| 211 |
+
raise ValueError(f"Unknown config name: {self.config.name}")
|
| 212 |
+
|
| 213 |
+
resolved_data_files = dl_manager.download_and_extract(data_files)
|
| 214 |
+
|
| 215 |
+
generators = []
|
| 216 |
+
for split_name, filepath in resolved_data_files.items():
|
| 217 |
+
generators.append(
|
| 218 |
+
SplitGenerator(
|
| 219 |
+
name=split_name,
|
| 220 |
+
gen_kwargs={"filepath": filepath}
|
| 221 |
+
)
|
| 222 |
+
)
|
| 223 |
+
return generators
|
| 224 |
+
|
| 225 |
+
def _generate_examples(self, filepath):
|
| 226 |
+
"""Yields examples from the dataset files, parsing data based on the active config."""
|
| 227 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 228 |
+
all_data = json.load(f) # For .json files, load the entire array
|
| 229 |
+
|
| 230 |
+
for id_, data in enumerate(all_data):
|
| 231 |
+
try:
|
| 232 |
+
# Common fields for all tasks (handle missing with .get)
|
| 233 |
+
video_url = data.get("video_url", None)
|
| 234 |
+
audio = data.get("audio", None)
|
| 235 |
+
question = data.get("question", None)
|
| 236 |
+
#answer_type = data.get("answer_type", None)
|
| 237 |
+
|
| 238 |
+
example = {
|
| 239 |
+
"video_url": video_url,
|
| 240 |
+
"audio": audio,
|
| 241 |
+
"question": question,
|
| 242 |
+
#"answer_type": answer_type # Include as it's a common field in your schemas
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
# --- Task-specific groundtruth and other fields ---
|
| 246 |
+
if self.config.name == "word_localization":
|
| 247 |
+
raw_groundtruth = data.get("groundtruth", [])
|
| 248 |
+
processed_groundtruth = []
|
| 249 |
+
for item in raw_groundtruth:
|
| 250 |
+
if isinstance(item, dict):
|
| 251 |
+
processed_groundtruth.append({
|
| 252 |
+
"word": item.get("word", None),
|
| 253 |
+
"start": item.get("start", None),
|
| 254 |
+
"end": item.get("end", None),
|
| 255 |
+
})
|
| 256 |
+
example["groundtruth"] = processed_groundtruth
|
| 257 |
+
|
| 258 |
+
elif self.config.name == "advertisement_localization":
|
| 259 |
+
raw_groundtruth = data.get("groundtruth", {})
|
| 260 |
+
raw_ads_segments = raw_groundtruth.get("ads_segment", [])
|
| 261 |
+
processed_ads_segments = []
|
| 262 |
+
for ad_item in raw_ads_segments:
|
| 263 |
+
if isinstance(ad_item, dict):
|
| 264 |
+
processed_ads_segments.append({
|
| 265 |
+
"text": ad_item.get("text", None),
|
| 266 |
+
"start": ad_item.get("start", None),
|
| 267 |
+
"end": ad_item.get("end", None),
|
| 268 |
+
})
|
| 269 |
+
raw_word_timestamps = raw_groundtruth.get("word_timestamp", [])
|
| 270 |
+
processed_word_timestamps = []
|
| 271 |
+
for word_item in raw_word_timestamps:
|
| 272 |
+
if isinstance(word_item, dict):
|
| 273 |
+
processed_word_timestamps.append({
|
| 274 |
+
"word": word_item.get("word", None),
|
| 275 |
+
"start": word_item.get("start", None),
|
| 276 |
+
"end": word_item.get("end", None),
|
| 277 |
+
})
|
| 278 |
+
example["groundtruth"] = {
|
| 279 |
+
"ads_segment": processed_ads_segments,
|
| 280 |
+
"word_timestamp": processed_word_timestamps,
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
elif self.config.name == "named_entity_localization":
|
| 284 |
+
raw_groundtruth = data.get("groundtruth", {})
|
| 285 |
+
raw_entities = raw_groundtruth.get("entities", [])
|
| 286 |
+
processed_entities = []
|
| 287 |
+
for entity_item in raw_entities:
|
| 288 |
+
if isinstance(entity_item, dict):
|
| 289 |
+
processed_entities.append({
|
| 290 |
+
"entity_type": entity_item.get("entity_type", None),
|
| 291 |
+
"entity": entity_item.get("entity", None),
|
| 292 |
+
"start": entity_item.get("start", None),
|
| 293 |
+
"end": entity_item.get("end", None),
|
| 294 |
+
})
|
| 295 |
+
raw_word_timestamps = raw_groundtruth.get("word_timestamp", [])
|
| 296 |
+
processed_word_timestamps = []
|
| 297 |
+
for word_item in raw_word_timestamps:
|
| 298 |
+
if isinstance(word_item, dict):
|
| 299 |
+
processed_word_timestamps.append({
|
| 300 |
+
"word": word_item.get("word", None),
|
| 301 |
+
"start": word_item.get("start", None),
|
| 302 |
+
"end": word_item.get("end", None),
|
| 303 |
+
})
|
| 304 |
+
example["groundtruth"] = {
|
| 305 |
+
"entities": processed_entities,
|
| 306 |
+
"word_timestamp": processed_word_timestamps,
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
elif self.config.name == "speaker_number_estimation":
|
| 310 |
+
raw_groundtruth = data.get("groundtruth", None)
|
| 311 |
+
processed_groundtruth = []
|
| 312 |
+
if raw_groundtruth is not None:
|
| 313 |
+
if isinstance(raw_groundtruth, list):
|
| 314 |
+
processed_groundtruth = [int(x) for x in raw_groundtruth if isinstance(x, (int, float))]
|
| 315 |
+
elif isinstance(raw_groundtruth, (int, float)):
|
| 316 |
+
processed_groundtruth = [int(raw_groundtruth)]
|
| 317 |
+
|
| 318 |
+
example["groundtruth"] = processed_groundtruth
|
| 319 |
+
|
| 320 |
+
elif self.config.name == "entire_duration":
|
| 321 |
+
example["groundtruth"] = data.get("groundtruth", None) # Assuming float
|
| 322 |
+
|
| 323 |
+
elif self.config.name == "event_duration":
|
| 324 |
+
example["groundtruth"] = data.get("groundtruth", None)
|
| 325 |
+
example["answer_type"] = data.get("answer_type", None)
|
| 326 |
+
|
| 327 |
+
elif self.config.name == "emotion_ranking":
|
| 328 |
+
example["type"] = data.get("type", None)
|
| 329 |
+
example["correct_option"] = data.get("correct_option", None)
|
| 330 |
+
example["option_A"] = data.get("option_A", None)
|
| 331 |
+
example["option_B"] = data.get("option_B", None)
|
| 332 |
+
example["option_C"] = data.get("option_C", None)
|
| 333 |
+
example["option_D"] = data.get("option_D", None)
|
| 334 |
+
example["option_E"] = data.get("option_E", None)
|
| 335 |
+
example["correct_answer"] = data.get("correct_answer", None)
|
| 336 |
+
|
| 337 |
+
elif self.config.name == "emotion_reasoning":
|
| 338 |
+
example["type"] = data.get("type", None)
|
| 339 |
+
example["correct_option"] = data.get("correct_option", None)
|
| 340 |
+
example["option_A"] = data.get("option_A", None)
|
| 341 |
+
example["option_B"] = data.get("option_B", None)
|
| 342 |
+
example["option_C"] = data.get("option_C", None)
|
| 343 |
+
example["option_D"] = data.get("option_D", None)
|
| 344 |
+
example["correct_answer"] = data.get("correct_answer", None)
|
| 345 |
+
|
| 346 |
+
else:
|
| 347 |
+
raise ValueError(f"Unknown config name: {self.config.name}. This should not happen if BUILDER_CONFIGS and _info are consistent.")
|
| 348 |
+
|
| 349 |
+
yield id_, example
|
| 350 |
+
|
| 351 |
+
except Exception as e:
|
| 352 |
+
print(f"Error processing example {id_} in {filepath} for config {self.config.name}: {e}")
|