meg-huggingface
commited on
Commit
·
3596f80
1
Parent(s):
95035be
Trying to make it work with new EAI versions
Browse files
src/backend/run_eval_suite_harness.py
CHANGED
|
@@ -46,6 +46,7 @@ def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int
|
|
| 46 |
logger.info(f"Selected Tasks: {task_names}")
|
| 47 |
|
| 48 |
# no_cache=no_cache,
|
|
|
|
| 49 |
results = evaluator.simple_evaluate(
|
| 50 |
model="hf-causal-experimental", # "hf-causal"
|
| 51 |
model_args=eval_request.get_model_args(),
|
|
@@ -55,7 +56,6 @@ def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int
|
|
| 55 |
device=device,
|
| 56 |
limit=limit,
|
| 57 |
write_out=True,
|
| 58 |
-
output_base_path="logs"
|
| 59 |
)
|
| 60 |
|
| 61 |
results["config"]["model_dtype"] = eval_request.precision
|
|
|
|
| 46 |
logger.info(f"Selected Tasks: {task_names}")
|
| 47 |
|
| 48 |
# no_cache=no_cache,
|
| 49 |
+
# output_base_path="logs"
|
| 50 |
results = evaluator.simple_evaluate(
|
| 51 |
model="hf-causal-experimental", # "hf-causal"
|
| 52 |
model_args=eval_request.get_model_args(),
|
|
|
|
| 56 |
device=device,
|
| 57 |
limit=limit,
|
| 58 |
write_out=True,
|
|
|
|
| 59 |
)
|
| 60 |
|
| 61 |
results["config"]["model_dtype"] = eval_request.precision
|