add document ordering snippets (#10)
Browse files- add document ordering snippets (f5dbcaaa85b9df44dd9c58c890e4a1984f3c18ed)
README.md
CHANGED
|
@@ -156,6 +156,48 @@ END QUESTION
|
|
| 156 |
|
| 157 |
Reported token counts per question are based on the completed prompt, using the `cl100k_base` tokenizer from `tiktoken`.
|
| 158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
## Scoring Approach
|
| 160 |
|
| 161 |
We use an LLM-based equality checker to evaluate responses:
|
|
|
|
| 156 |
|
| 157 |
Reported token counts per question are based on the completed prompt, using the `cl100k_base` tokenizer from `tiktoken`.
|
| 158 |
|
| 159 |
+
The order in which documents are loaded in matters - they should be added to the prompt template in the order of the filenames in `data_source_filenames`. Below are code snippets showing how we read the questions and extracted text files from disk.
|
| 160 |
+
|
| 161 |
+
```
|
| 162 |
+
def load_questions(self) -> list[dict]:
|
| 163 |
+
"""Load LCR questions from HuggingFace dataset"""
|
| 164 |
+
csv_path = hf_hub_download(
|
| 165 |
+
repo_id="ArtificialAnalysis/AA-LCR",
|
| 166 |
+
filename="AA-LCR_Dataset.csv",
|
| 167 |
+
repo_type="dataset",
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
questions = []
|
| 171 |
+
with open(csv_path, encoding="utf-8") as f:
|
| 172 |
+
reader = csv.DictReader(f)
|
| 173 |
+
for row in reader:
|
| 174 |
+
# Parse data_source_filenames as ordered list
|
| 175 |
+
if "data_source_filenames" in row and isinstance(row["data_source_filenames"], str):
|
| 176 |
+
row["data_source_filenames"] = row["data_source_filenames"].split(";")
|
| 177 |
+
|
| 178 |
+
# Parse answer as list (semicolon-separated criteria)
|
| 179 |
+
if "answer" in row and isinstance(row["answer"], str):
|
| 180 |
+
row["answer"] = row["answer"].split(";")
|
| 181 |
+
questions.append(row)
|
| 182 |
+
|
| 183 |
+
return questions
|
| 184 |
+
|
| 185 |
+
def get_document_set(
|
| 186 |
+
self, dataset_folder: str, document_category: str, document_set_id: str, data_source_filenames: list[str]
|
| 187 |
+
) -> list[str]:
|
| 188 |
+
"""Get document set for a question in the order specified by data_source_filenames"""
|
| 189 |
+
|
| 190 |
+
# Documents are extracted to lcr/lcr/{category}/{set_id}/ from the HuggingFace zip
|
| 191 |
+
document_set_path = os.path.join(dataset_folder, document_category, document_set_id)
|
| 192 |
+
|
| 193 |
+
document_texts = []
|
| 194 |
+
for filename in data_source_filenames:
|
| 195 |
+
document_path = os.path.join(document_set_path, filename)
|
| 196 |
+
with open(document_path, encoding="utf-8") as f:
|
| 197 |
+
document_texts.append(f.read())
|
| 198 |
+
return document_texts
|
| 199 |
+
```
|
| 200 |
+
|
| 201 |
## Scoring Approach
|
| 202 |
|
| 203 |
We use an LLM-based equality checker to evaluate responses:
|