Spaces:
Runtime error
Runtime error
Ahmed Ahmed
commited on
Commit
·
536d515
1
Parent(s):
ce8066d
consolidate
Browse files- app.py +96 -36
- src/evaluation/dynamic_eval.py +36 -7
- src/evaluation/perplexity_eval.py +67 -32
- src/leaderboard/read_evals.py +45 -18
- src/populate.py +79 -42
app.py
CHANGED
|
@@ -41,50 +41,100 @@ def init_leaderboard(dataframe):
|
|
| 41 |
)
|
| 42 |
|
| 43 |
def refresh_leaderboard():
|
| 44 |
-
|
| 45 |
-
|
|
|
|
| 46 |
try:
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
except Exception as e:
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
return init_leaderboard(df)
|
| 66 |
|
| 67 |
def run_perplexity_test(model_name, revision, precision):
|
| 68 |
"""Run perplexity evaluation on demand."""
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
print(f"Revision: {revision}", flush=True)
|
| 72 |
-
print(f"Precision: {precision}", flush=True)
|
| 73 |
|
| 74 |
if not model_name:
|
| 75 |
return "Please enter a model name.", None
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
# Initialize results repository and directory
|
| 90 |
try:
|
|
@@ -131,20 +181,30 @@ with demo:
|
|
| 131 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 132 |
|
| 133 |
with gr.TabItem("🧪 Test Model", elem_id="test-model-tab", id=2):
|
|
|
|
|
|
|
| 134 |
with gr.Row():
|
| 135 |
with gr.Column():
|
| 136 |
-
model_name = gr.Textbox(label="Model name", placeholder="
|
| 137 |
revision = gr.Textbox(label="Revision", placeholder="main", value="main")
|
| 138 |
precision = gr.Dropdown(
|
| 139 |
choices=["float16", "bfloat16"],
|
| 140 |
label="Precision",
|
| 141 |
value="float16"
|
| 142 |
)
|
|
|
|
| 143 |
|
| 144 |
with gr.Column():
|
| 145 |
test_button = gr.Button("🚀 Run Perplexity Test", variant="primary")
|
| 146 |
result = gr.Markdown()
|
| 147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
test_button.click(
|
| 149 |
run_perplexity_test,
|
| 150 |
[model_name, revision, precision],
|
|
|
|
| 41 |
)
|
| 42 |
|
| 43 |
def refresh_leaderboard():
|
| 44 |
+
import sys
|
| 45 |
+
import traceback
|
| 46 |
+
|
| 47 |
try:
|
| 48 |
+
sys.stderr.write("Refreshing leaderboard data...\n")
|
| 49 |
+
sys.stderr.flush()
|
| 50 |
+
|
| 51 |
+
# Get fresh leaderboard data
|
| 52 |
+
df = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS)
|
| 53 |
+
sys.stderr.write(f"Got DataFrame with shape: {df.shape}\n")
|
| 54 |
+
sys.stderr.write(f"DataFrame columns: {df.columns.tolist()}\n")
|
| 55 |
+
sys.stderr.flush()
|
| 56 |
+
|
| 57 |
+
# Check if DataFrame is valid for leaderboard
|
| 58 |
+
if df is None:
|
| 59 |
+
sys.stderr.write("DataFrame is None, cannot create leaderboard\n")
|
| 60 |
+
sys.stderr.flush()
|
| 61 |
+
raise ValueError("DataFrame is None")
|
| 62 |
+
|
| 63 |
+
if df.empty:
|
| 64 |
+
sys.stderr.write("DataFrame is empty, creating minimal valid DataFrame\n")
|
| 65 |
+
sys.stderr.flush()
|
| 66 |
+
# Create a minimal valid DataFrame that won't crash the leaderboard
|
| 67 |
+
import pandas as pd
|
| 68 |
+
empty_df = pd.DataFrame(columns=COLS)
|
| 69 |
+
# Add one dummy row to prevent leaderboard component from crashing
|
| 70 |
+
dummy_row = {col: 0 if col in BENCHMARK_COLS or col == AutoEvalColumn.average.name else "" for col in COLS}
|
| 71 |
+
dummy_row[AutoEvalColumn.model.name] = "No models evaluated yet"
|
| 72 |
+
dummy_row[AutoEvalColumn.model_type_symbol.name] = "?"
|
| 73 |
+
empty_df = pd.DataFrame([dummy_row])
|
| 74 |
+
return init_leaderboard(empty_df)
|
| 75 |
+
|
| 76 |
+
sys.stderr.write("Creating leaderboard with valid DataFrame\n")
|
| 77 |
+
sys.stderr.flush()
|
| 78 |
+
return init_leaderboard(df)
|
| 79 |
+
|
| 80 |
except Exception as e:
|
| 81 |
+
error_msg = str(e)
|
| 82 |
+
traceback_str = traceback.format_exc()
|
| 83 |
+
sys.stderr.write(f"Error in refresh_leaderboard: {error_msg}\n")
|
| 84 |
+
sys.stderr.write(f"Traceback: {traceback_str}\n")
|
| 85 |
+
sys.stderr.flush()
|
| 86 |
+
raise
|
|
|
|
| 87 |
|
| 88 |
def run_perplexity_test(model_name, revision, precision):
|
| 89 |
"""Run perplexity evaluation on demand."""
|
| 90 |
+
import sys
|
| 91 |
+
import traceback
|
|
|
|
|
|
|
| 92 |
|
| 93 |
if not model_name:
|
| 94 |
return "Please enter a model name.", None
|
| 95 |
|
| 96 |
+
try:
|
| 97 |
+
# Use stderr for more reliable logging in HF Spaces
|
| 98 |
+
sys.stderr.write(f"\n=== Running Perplexity Test ===\n")
|
| 99 |
+
sys.stderr.write(f"Model: {model_name}\n")
|
| 100 |
+
sys.stderr.write(f"Revision: {revision}\n")
|
| 101 |
+
sys.stderr.write(f"Precision: {precision}\n")
|
| 102 |
+
sys.stderr.flush()
|
| 103 |
+
|
| 104 |
+
success, result = run_dynamic_perplexity_eval(model_name, revision, precision)
|
| 105 |
+
sys.stderr.write(f"Evaluation result - Success: {success}, Result: {result}\n")
|
| 106 |
+
sys.stderr.flush()
|
| 107 |
+
|
| 108 |
+
if success:
|
| 109 |
+
try:
|
| 110 |
+
# Try to refresh leaderboard
|
| 111 |
+
sys.stderr.write("Attempting to refresh leaderboard...\n")
|
| 112 |
+
sys.stderr.flush()
|
| 113 |
+
|
| 114 |
+
new_leaderboard = refresh_leaderboard()
|
| 115 |
+
sys.stderr.write("Leaderboard refresh successful\n")
|
| 116 |
+
sys.stderr.flush()
|
| 117 |
+
|
| 118 |
+
return f"✅ Perplexity evaluation completed!\nPerplexity: {result:.4f}\n\nResults saved to leaderboard.", new_leaderboard
|
| 119 |
+
except Exception as refresh_error:
|
| 120 |
+
# If leaderboard refresh fails, still show success but don't update leaderboard
|
| 121 |
+
error_msg = str(refresh_error)
|
| 122 |
+
traceback_str = traceback.format_exc()
|
| 123 |
+
sys.stderr.write(f"Leaderboard refresh failed: {error_msg}\n")
|
| 124 |
+
sys.stderr.write(f"Traceback: {traceback_str}\n")
|
| 125 |
+
sys.stderr.flush()
|
| 126 |
+
|
| 127 |
+
return f"✅ Perplexity evaluation completed!\nPerplexity: {result:.4f}\n\n⚠️ Results saved but leaderboard refresh failed: {error_msg}\n\nPlease refresh the page to see updated results.", None
|
| 128 |
+
else:
|
| 129 |
+
return f"❌ Evaluation failed: {result}", None
|
| 130 |
+
|
| 131 |
+
except Exception as e:
|
| 132 |
+
error_msg = str(e)
|
| 133 |
+
traceback_str = traceback.format_exc()
|
| 134 |
+
sys.stderr.write(f"Critical error in run_perplexity_test: {error_msg}\n")
|
| 135 |
+
sys.stderr.write(f"Traceback: {traceback_str}\n")
|
| 136 |
+
sys.stderr.flush()
|
| 137 |
+
return f"❌ Critical error: {error_msg}", None
|
| 138 |
|
| 139 |
# Initialize results repository and directory
|
| 140 |
try:
|
|
|
|
| 181 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 182 |
|
| 183 |
with gr.TabItem("🧪 Test Model", elem_id="test-model-tab", id=2):
|
| 184 |
+
gr.Markdown("## Run Perplexity Test\n\nTest any Hugging Face model for perplexity evaluation.")
|
| 185 |
+
|
| 186 |
with gr.Row():
|
| 187 |
with gr.Column():
|
| 188 |
+
model_name = gr.Textbox(label="Model name", placeholder="openai-community/gpt2")
|
| 189 |
revision = gr.Textbox(label="Revision", placeholder="main", value="main")
|
| 190 |
precision = gr.Dropdown(
|
| 191 |
choices=["float16", "bfloat16"],
|
| 192 |
label="Precision",
|
| 193 |
value="float16"
|
| 194 |
)
|
| 195 |
+
debug_mode = gr.Checkbox(label="Enable debug mode (more verbose logging)", value=True)
|
| 196 |
|
| 197 |
with gr.Column():
|
| 198 |
test_button = gr.Button("🚀 Run Perplexity Test", variant="primary")
|
| 199 |
result = gr.Markdown()
|
| 200 |
|
| 201 |
+
gr.Markdown("""
|
| 202 |
+
### Tips:
|
| 203 |
+
- Check stderr logs in HF Spaces for detailed debugging information
|
| 204 |
+
- If evaluation succeeds but leaderboard doesn't update, try refreshing the page
|
| 205 |
+
- Example models to test: `openai-community/gpt2`, `EleutherAI/gpt-neo-1.3B`
|
| 206 |
+
""")
|
| 207 |
+
|
| 208 |
test_button.click(
|
| 209 |
run_perplexity_test,
|
| 210 |
[model_name, revision, precision],
|
src/evaluation/dynamic_eval.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import json
|
| 2 |
import os
|
|
|
|
| 3 |
from datetime import datetime
|
| 4 |
from src.evaluation.perplexity_eval import evaluate_perplexity, create_perplexity_result
|
| 5 |
from src.envs import EVAL_RESULTS_PATH, API, RESULTS_REPO
|
|
@@ -9,11 +10,20 @@ def run_dynamic_perplexity_eval(model_name, revision="main", precision="float16"
|
|
| 9 |
Run perplexity evaluation and save results.
|
| 10 |
"""
|
| 11 |
try:
|
|
|
|
|
|
|
|
|
|
| 12 |
# Run evaluation
|
|
|
|
|
|
|
| 13 |
perplexity_score = evaluate_perplexity(model_name, revision)
|
|
|
|
|
|
|
| 14 |
|
| 15 |
# Create result structure
|
| 16 |
result = create_perplexity_result(model_name, revision, precision, perplexity_score)
|
|
|
|
|
|
|
| 17 |
|
| 18 |
# Save result file
|
| 19 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
@@ -25,20 +35,39 @@ def run_dynamic_perplexity_eval(model_name, revision="main", precision="float16"
|
|
| 25 |
os.makedirs(result_dir, exist_ok=True)
|
| 26 |
|
| 27 |
result_path = os.path.join(result_dir, result_filename)
|
|
|
|
|
|
|
| 28 |
|
| 29 |
with open(result_path, "w") as f:
|
| 30 |
json.dump(result, f, indent=2)
|
| 31 |
|
|
|
|
|
|
|
|
|
|
| 32 |
# Upload to Hugging Face dataset
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
return True, perplexity_score
|
| 42 |
|
| 43 |
except Exception as e:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
return False, str(e)
|
|
|
|
| 1 |
import json
|
| 2 |
import os
|
| 3 |
+
import sys
|
| 4 |
from datetime import datetime
|
| 5 |
from src.evaluation.perplexity_eval import evaluate_perplexity, create_perplexity_result
|
| 6 |
from src.envs import EVAL_RESULTS_PATH, API, RESULTS_REPO
|
|
|
|
| 10 |
Run perplexity evaluation and save results.
|
| 11 |
"""
|
| 12 |
try:
|
| 13 |
+
sys.stderr.write(f"Starting dynamic evaluation for {model_name}\n")
|
| 14 |
+
sys.stderr.flush()
|
| 15 |
+
|
| 16 |
# Run evaluation
|
| 17 |
+
sys.stderr.write("Running perplexity evaluation...\n")
|
| 18 |
+
sys.stderr.flush()
|
| 19 |
perplexity_score = evaluate_perplexity(model_name, revision)
|
| 20 |
+
sys.stderr.write(f"Perplexity evaluation completed: {perplexity_score}\n")
|
| 21 |
+
sys.stderr.flush()
|
| 22 |
|
| 23 |
# Create result structure
|
| 24 |
result = create_perplexity_result(model_name, revision, precision, perplexity_score)
|
| 25 |
+
sys.stderr.write(f"Created result structure: {result}\n")
|
| 26 |
+
sys.stderr.flush()
|
| 27 |
|
| 28 |
# Save result file
|
| 29 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
|
|
| 35 |
os.makedirs(result_dir, exist_ok=True)
|
| 36 |
|
| 37 |
result_path = os.path.join(result_dir, result_filename)
|
| 38 |
+
sys.stderr.write(f"Saving result to: {result_path}\n")
|
| 39 |
+
sys.stderr.flush()
|
| 40 |
|
| 41 |
with open(result_path, "w") as f:
|
| 42 |
json.dump(result, f, indent=2)
|
| 43 |
|
| 44 |
+
sys.stderr.write("Result file saved locally\n")
|
| 45 |
+
sys.stderr.flush()
|
| 46 |
+
|
| 47 |
# Upload to Hugging Face dataset
|
| 48 |
+
try:
|
| 49 |
+
sys.stderr.write(f"Uploading to HF dataset: {RESULTS_REPO}\n")
|
| 50 |
+
sys.stderr.flush()
|
| 51 |
+
|
| 52 |
+
API.upload_file(
|
| 53 |
+
path_or_fileobj=result_path,
|
| 54 |
+
path_in_repo=result_path.split("eval-results/")[1],
|
| 55 |
+
repo_id=RESULTS_REPO,
|
| 56 |
+
repo_type="dataset",
|
| 57 |
+
commit_message=f"Add perplexity results for {model_name}",
|
| 58 |
+
)
|
| 59 |
+
sys.stderr.write("Upload completed successfully\n")
|
| 60 |
+
sys.stderr.flush()
|
| 61 |
+
except Exception as upload_error:
|
| 62 |
+
sys.stderr.write(f"Upload failed: {upload_error}\n")
|
| 63 |
+
sys.stderr.flush()
|
| 64 |
+
# Don't fail the whole process if upload fails
|
| 65 |
|
| 66 |
return True, perplexity_score
|
| 67 |
|
| 68 |
except Exception as e:
|
| 69 |
+
import traceback
|
| 70 |
+
sys.stderr.write(f"Error in run_dynamic_perplexity_eval: {e}\n")
|
| 71 |
+
sys.stderr.write(f"Traceback: {traceback.format_exc()}\n")
|
| 72 |
+
sys.stderr.flush()
|
| 73 |
return False, str(e)
|
src/evaluation/perplexity_eval.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import torch
|
|
|
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
import numpy as np
|
| 4 |
|
|
@@ -15,38 +16,72 @@ def evaluate_perplexity(model_name, revision="main", test_text=None):
|
|
| 15 |
float: Perplexity score (lower is better)
|
| 16 |
"""
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
def create_perplexity_result(model_name, revision, precision, perplexity_score):
|
| 52 |
"""
|
|
|
|
| 1 |
import torch
|
| 2 |
+
import sys
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
import numpy as np
|
| 5 |
|
|
|
|
| 16 |
float: Perplexity score (lower is better)
|
| 17 |
"""
|
| 18 |
|
| 19 |
+
try:
|
| 20 |
+
sys.stderr.write(f"Loading model: {model_name} (revision: {revision})\n")
|
| 21 |
+
sys.stderr.flush()
|
| 22 |
+
|
| 23 |
+
# Default test text if none provided
|
| 24 |
+
if test_text is None:
|
| 25 |
+
test_text = """Artificial intelligence has transformed the way we live and work, bringing both opportunities and challenges.
|
| 26 |
+
From autonomous vehicles to language models that can engage in human-like conversation, AI technologies are becoming increasingly
|
| 27 |
+
sophisticated. However, with this advancement comes the responsibility to ensure these systems are developed and deployed ethically,
|
| 28 |
+
with careful consideration for privacy, fairness, and transparency. The future of AI will likely depend on how well we balance innovation
|
| 29 |
+
with these important social considerations."""
|
| 30 |
+
|
| 31 |
+
sys.stderr.write("Loading tokenizer...\n")
|
| 32 |
+
sys.stderr.flush()
|
| 33 |
+
# Load tokenizer first
|
| 34 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, revision=revision)
|
| 35 |
+
sys.stderr.write("Tokenizer loaded successfully\n")
|
| 36 |
+
sys.stderr.flush()
|
| 37 |
+
|
| 38 |
+
sys.stderr.write("Loading model...\n")
|
| 39 |
+
sys.stderr.flush()
|
| 40 |
+
# Load model
|
| 41 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 42 |
+
model_name,
|
| 43 |
+
revision=revision,
|
| 44 |
+
torch_dtype=torch.float16,
|
| 45 |
+
device_map="auto"
|
| 46 |
+
)
|
| 47 |
+
sys.stderr.write("Model loaded successfully\n")
|
| 48 |
+
sys.stderr.flush()
|
| 49 |
+
|
| 50 |
+
sys.stderr.write("Tokenizing input text...\n")
|
| 51 |
+
sys.stderr.flush()
|
| 52 |
+
# Tokenize the text
|
| 53 |
+
inputs = tokenizer(test_text, return_tensors="pt")
|
| 54 |
+
sys.stderr.write(f"Tokenized input shape: {inputs['input_ids'].shape}\n")
|
| 55 |
+
sys.stderr.flush()
|
| 56 |
+
|
| 57 |
+
# Move to same device as model
|
| 58 |
+
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
| 59 |
+
sys.stderr.write(f"Moved inputs to device: {model.device}\n")
|
| 60 |
+
sys.stderr.flush()
|
| 61 |
+
|
| 62 |
+
sys.stderr.write("Running forward pass...\n")
|
| 63 |
+
sys.stderr.flush()
|
| 64 |
+
# Calculate loss
|
| 65 |
+
with torch.no_grad():
|
| 66 |
+
outputs = model(**inputs, labels=inputs["input_ids"])
|
| 67 |
+
loss = outputs.loss
|
| 68 |
+
|
| 69 |
+
sys.stderr.write(f"Calculated loss: {loss.item()}\n")
|
| 70 |
+
sys.stderr.flush()
|
| 71 |
+
|
| 72 |
+
# Calculate perplexity
|
| 73 |
+
perplexity = torch.exp(loss).item()
|
| 74 |
+
sys.stderr.write(f"Final perplexity: {perplexity}\n")
|
| 75 |
+
sys.stderr.flush()
|
| 76 |
+
|
| 77 |
+
return perplexity
|
| 78 |
+
|
| 79 |
+
except Exception as e:
|
| 80 |
+
import traceback
|
| 81 |
+
sys.stderr.write(f"Error in evaluate_perplexity: {e}\n")
|
| 82 |
+
sys.stderr.write(f"Traceback: {traceback.format_exc()}\n")
|
| 83 |
+
sys.stderr.flush()
|
| 84 |
+
raise
|
| 85 |
|
| 86 |
def create_perplexity_result(model_name, revision, precision, perplexity_score):
|
| 87 |
"""
|
src/leaderboard/read_evals.py
CHANGED
|
@@ -76,8 +76,11 @@ class EvalResult:
|
|
| 76 |
|
| 77 |
def to_dict(self):
|
| 78 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
| 79 |
-
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
# Calculate average, handling perplexity (lower is better)
|
| 83 |
scores = []
|
|
@@ -93,7 +96,8 @@ class EvalResult:
|
|
| 93 |
scores.append(score)
|
| 94 |
|
| 95 |
average = sum(scores) / len(scores) if scores else 0
|
| 96 |
-
|
|
|
|
| 97 |
|
| 98 |
data_dict = {
|
| 99 |
"eval_name": self.eval_name, # not a column, just a save name,
|
|
@@ -115,17 +119,24 @@ class EvalResult:
|
|
| 115 |
# Add perplexity score with the exact column name from Tasks
|
| 116 |
if perplexity_score is not None:
|
| 117 |
data_dict[Tasks.task0.value.col_name] = perplexity_score
|
| 118 |
-
|
|
|
|
| 119 |
else:
|
| 120 |
data_dict[Tasks.task0.value.col_name] = None
|
| 121 |
-
|
|
|
|
| 122 |
|
| 123 |
-
|
|
|
|
| 124 |
return data_dict
|
| 125 |
|
| 126 |
def get_raw_eval_results(results_path: str) -> list[EvalResult]:
|
| 127 |
"""From the path of the results folder root, extract all perplexity results"""
|
| 128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
model_result_filepaths = []
|
| 130 |
|
| 131 |
for root, _, files in os.walk(results_path):
|
|
@@ -136,39 +147,55 @@ def get_raw_eval_results(results_path: str) -> list[EvalResult]:
|
|
| 136 |
for file in files:
|
| 137 |
model_result_filepaths.append(os.path.join(root, file))
|
| 138 |
|
| 139 |
-
|
|
|
|
| 140 |
|
| 141 |
eval_results = {}
|
| 142 |
for model_result_filepath in model_result_filepaths:
|
| 143 |
try:
|
| 144 |
-
|
|
|
|
| 145 |
# Creation of result
|
| 146 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
| 147 |
-
|
|
|
|
| 148 |
|
| 149 |
# Store results of same eval together
|
| 150 |
eval_name = eval_result.eval_name
|
| 151 |
if eval_name in eval_results.keys():
|
| 152 |
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
| 153 |
-
|
|
|
|
| 154 |
else:
|
| 155 |
eval_results[eval_name] = eval_result
|
| 156 |
-
|
|
|
|
| 157 |
except Exception as e:
|
| 158 |
-
|
|
|
|
|
|
|
|
|
|
| 159 |
continue
|
| 160 |
|
| 161 |
results = []
|
| 162 |
-
|
|
|
|
|
|
|
| 163 |
for v in eval_results.values():
|
| 164 |
try:
|
| 165 |
-
|
|
|
|
| 166 |
v.to_dict() # we test if the dict version is complete
|
| 167 |
results.append(v)
|
| 168 |
-
|
|
|
|
| 169 |
except KeyError as e:
|
| 170 |
-
|
|
|
|
|
|
|
|
|
|
| 171 |
continue
|
| 172 |
|
| 173 |
-
|
|
|
|
| 174 |
return results
|
|
|
|
| 76 |
|
| 77 |
def to_dict(self):
|
| 78 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
| 79 |
+
import sys
|
| 80 |
+
|
| 81 |
+
sys.stderr.write(f"\nProcessing result for model: {self.full_model}\n")
|
| 82 |
+
sys.stderr.write(f"Raw results: {self.results}\n")
|
| 83 |
+
sys.stderr.flush()
|
| 84 |
|
| 85 |
# Calculate average, handling perplexity (lower is better)
|
| 86 |
scores = []
|
|
|
|
| 96 |
scores.append(score)
|
| 97 |
|
| 98 |
average = sum(scores) / len(scores) if scores else 0
|
| 99 |
+
sys.stderr.write(f"Calculated average score: {average}\n")
|
| 100 |
+
sys.stderr.flush()
|
| 101 |
|
| 102 |
data_dict = {
|
| 103 |
"eval_name": self.eval_name, # not a column, just a save name,
|
|
|
|
| 119 |
# Add perplexity score with the exact column name from Tasks
|
| 120 |
if perplexity_score is not None:
|
| 121 |
data_dict[Tasks.task0.value.col_name] = perplexity_score
|
| 122 |
+
sys.stderr.write(f"Added perplexity score {perplexity_score} under column {Tasks.task0.value.col_name}\n")
|
| 123 |
+
sys.stderr.flush()
|
| 124 |
else:
|
| 125 |
data_dict[Tasks.task0.value.col_name] = None
|
| 126 |
+
sys.stderr.write(f"No perplexity score found for column {Tasks.task0.value.col_name}\n")
|
| 127 |
+
sys.stderr.flush()
|
| 128 |
|
| 129 |
+
sys.stderr.write(f"Final data dict keys: {list(data_dict.keys())}\n")
|
| 130 |
+
sys.stderr.flush()
|
| 131 |
return data_dict
|
| 132 |
|
| 133 |
def get_raw_eval_results(results_path: str) -> list[EvalResult]:
|
| 134 |
"""From the path of the results folder root, extract all perplexity results"""
|
| 135 |
+
import sys
|
| 136 |
+
|
| 137 |
+
sys.stderr.write(f"\nSearching for result files in: {results_path}\n")
|
| 138 |
+
sys.stderr.flush()
|
| 139 |
+
|
| 140 |
model_result_filepaths = []
|
| 141 |
|
| 142 |
for root, _, files in os.walk(results_path):
|
|
|
|
| 147 |
for file in files:
|
| 148 |
model_result_filepaths.append(os.path.join(root, file))
|
| 149 |
|
| 150 |
+
sys.stderr.write(f"Found {len(model_result_filepaths)} result files\n")
|
| 151 |
+
sys.stderr.flush()
|
| 152 |
|
| 153 |
eval_results = {}
|
| 154 |
for model_result_filepath in model_result_filepaths:
|
| 155 |
try:
|
| 156 |
+
sys.stderr.write(f"\nProcessing file: {model_result_filepath}\n")
|
| 157 |
+
sys.stderr.flush()
|
| 158 |
# Creation of result
|
| 159 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
| 160 |
+
sys.stderr.write(f"Created result object for: {eval_result.full_model}\n")
|
| 161 |
+
sys.stderr.flush()
|
| 162 |
|
| 163 |
# Store results of same eval together
|
| 164 |
eval_name = eval_result.eval_name
|
| 165 |
if eval_name in eval_results.keys():
|
| 166 |
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
| 167 |
+
sys.stderr.write(f"Updated existing result for {eval_name}\n")
|
| 168 |
+
sys.stderr.flush()
|
| 169 |
else:
|
| 170 |
eval_results[eval_name] = eval_result
|
| 171 |
+
sys.stderr.write(f"Added new result for {eval_name}\n")
|
| 172 |
+
sys.stderr.flush()
|
| 173 |
except Exception as e:
|
| 174 |
+
sys.stderr.write(f"Error processing result file {model_result_filepath}: {e}\n")
|
| 175 |
+
import traceback
|
| 176 |
+
sys.stderr.write(f"Traceback: {traceback.format_exc()}\n")
|
| 177 |
+
sys.stderr.flush()
|
| 178 |
continue
|
| 179 |
|
| 180 |
results = []
|
| 181 |
+
sys.stderr.write(f"\nProcessing {len(eval_results)} evaluation results\n")
|
| 182 |
+
sys.stderr.flush()
|
| 183 |
+
|
| 184 |
for v in eval_results.values():
|
| 185 |
try:
|
| 186 |
+
sys.stderr.write(f"\nConverting result to dict for: {v.full_model}\n")
|
| 187 |
+
sys.stderr.flush()
|
| 188 |
v.to_dict() # we test if the dict version is complete
|
| 189 |
results.append(v)
|
| 190 |
+
sys.stderr.write("Successfully converted and added result\n")
|
| 191 |
+
sys.stderr.flush()
|
| 192 |
except KeyError as e:
|
| 193 |
+
sys.stderr.write(f"Error converting result to dict: {e}\n")
|
| 194 |
+
import traceback
|
| 195 |
+
sys.stderr.write(f"Traceback: {traceback.format_exc()}\n")
|
| 196 |
+
sys.stderr.flush()
|
| 197 |
continue
|
| 198 |
|
| 199 |
+
sys.stderr.write(f"\nReturning {len(results)} processed results\n")
|
| 200 |
+
sys.stderr.flush()
|
| 201 |
return results
|
src/populate.py
CHANGED
|
@@ -1,60 +1,97 @@
|
|
| 1 |
import pandas as pd
|
|
|
|
| 2 |
from src.display.formatting import has_no_nan_values, make_clickable_model
|
| 3 |
from src.display.utils import AutoEvalColumn
|
| 4 |
from src.leaderboard.read_evals import get_raw_eval_results
|
| 5 |
|
| 6 |
def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
| 7 |
"""Creates a dataframe from all the individual experiment results"""
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
| 12 |
|
| 13 |
-
|
| 14 |
-
|
|
|
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
|
|
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
empty_df = pd.DataFrame(columns=cols)
|
| 50 |
empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
|
| 51 |
for col in benchmark_cols:
|
| 52 |
empty_df[col] = pd.Series(dtype=float)
|
| 53 |
return empty_df
|
| 54 |
-
|
| 55 |
-
# filter out if perplexity hasn't been evaluated
|
| 56 |
-
df = df[has_no_nan_values(df, benchmark_cols)]
|
| 57 |
-
print("\nFinal DataFrame shape after filtering:", df.shape, flush=True)
|
| 58 |
-
print("Final columns:", df.columns.tolist(), flush=True)
|
| 59 |
-
|
| 60 |
-
return df
|
|
|
|
| 1 |
import pandas as pd
|
| 2 |
+
import sys
|
| 3 |
from src.display.formatting import has_no_nan_values, make_clickable_model
|
| 4 |
from src.display.utils import AutoEvalColumn
|
| 5 |
from src.leaderboard.read_evals import get_raw_eval_results
|
| 6 |
|
| 7 |
def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
| 8 |
"""Creates a dataframe from all the individual experiment results"""
|
| 9 |
+
try:
|
| 10 |
+
sys.stderr.write("\n=== Starting leaderboard creation ===\n")
|
| 11 |
+
sys.stderr.write(f"Looking for results in: {results_path}\n")
|
| 12 |
+
sys.stderr.write(f"Expected columns: {cols}\n")
|
| 13 |
+
sys.stderr.write(f"Benchmark columns: {benchmark_cols}\n")
|
| 14 |
+
sys.stderr.flush()
|
| 15 |
|
| 16 |
+
raw_data = get_raw_eval_results(results_path)
|
| 17 |
+
sys.stderr.write(f"\nFound {len(raw_data)} raw results\n")
|
| 18 |
+
sys.stderr.flush()
|
| 19 |
|
| 20 |
+
all_data_json = []
|
| 21 |
+
for i, v in enumerate(raw_data):
|
| 22 |
+
try:
|
| 23 |
+
data_dict = v.to_dict()
|
| 24 |
+
all_data_json.append(data_dict)
|
| 25 |
+
sys.stderr.write(f"Successfully processed result {i+1}/{len(raw_data)}: {v.full_model}\n")
|
| 26 |
+
sys.stderr.flush()
|
| 27 |
+
except Exception as e:
|
| 28 |
+
sys.stderr.write(f"Error processing result {i+1}/{len(raw_data)} ({v.full_model}): {e}\n")
|
| 29 |
+
sys.stderr.flush()
|
| 30 |
+
continue
|
| 31 |
+
|
| 32 |
+
sys.stderr.write(f"\nConverted to {len(all_data_json)} JSON records\n")
|
| 33 |
+
sys.stderr.flush()
|
| 34 |
+
|
| 35 |
+
if all_data_json:
|
| 36 |
+
sys.stderr.write("Sample record keys: " + str(list(all_data_json[0].keys())) + "\n")
|
| 37 |
+
sys.stderr.flush()
|
| 38 |
|
| 39 |
+
if not all_data_json:
|
| 40 |
+
sys.stderr.write("\nNo data found, creating empty DataFrame\n")
|
| 41 |
+
sys.stderr.flush()
|
| 42 |
+
empty_df = pd.DataFrame(columns=cols)
|
| 43 |
+
# Ensure correct column types
|
| 44 |
+
empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
|
| 45 |
+
for col in benchmark_cols:
|
| 46 |
+
empty_df[col] = pd.Series(dtype=float)
|
| 47 |
+
return empty_df
|
| 48 |
|
| 49 |
+
df = pd.DataFrame.from_records(all_data_json)
|
| 50 |
+
sys.stderr.write("\nCreated DataFrame with columns: " + str(df.columns.tolist()) + "\n")
|
| 51 |
+
sys.stderr.write("DataFrame shape: " + str(df.shape) + "\n")
|
| 52 |
+
sys.stderr.flush()
|
| 53 |
|
| 54 |
+
try:
|
| 55 |
+
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
| 56 |
+
sys.stderr.write("\nSorted DataFrame by average\n")
|
| 57 |
+
sys.stderr.flush()
|
| 58 |
+
except KeyError as e:
|
| 59 |
+
sys.stderr.write(f"\nError sorting DataFrame: {e}\n")
|
| 60 |
+
sys.stderr.write("Available columns: " + str(df.columns.tolist()) + "\n")
|
| 61 |
+
sys.stderr.flush()
|
| 62 |
|
| 63 |
+
try:
|
| 64 |
+
df = df[cols].round(decimals=2)
|
| 65 |
+
sys.stderr.write("\nSelected and rounded columns\n")
|
| 66 |
+
sys.stderr.flush()
|
| 67 |
+
except KeyError as e:
|
| 68 |
+
sys.stderr.write(f"\nError selecting columns: {e}\n")
|
| 69 |
+
sys.stderr.write("Requested columns: " + str(cols) + "\n")
|
| 70 |
+
sys.stderr.write("Available columns: " + str(df.columns.tolist()) + "\n")
|
| 71 |
+
sys.stderr.flush()
|
| 72 |
+
# Create empty DataFrame with correct structure
|
| 73 |
+
empty_df = pd.DataFrame(columns=cols)
|
| 74 |
+
empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
|
| 75 |
+
for col in benchmark_cols:
|
| 76 |
+
empty_df[col] = pd.Series(dtype=float)
|
| 77 |
+
return empty_df
|
| 78 |
+
|
| 79 |
+
# filter out if perplexity hasn't been evaluated
|
| 80 |
+
df = df[has_no_nan_values(df, benchmark_cols)]
|
| 81 |
+
sys.stderr.write("\nFinal DataFrame shape after filtering: " + str(df.shape) + "\n")
|
| 82 |
+
sys.stderr.write("Final columns: " + str(df.columns.tolist()) + "\n")
|
| 83 |
+
sys.stderr.flush()
|
| 84 |
+
|
| 85 |
+
return df
|
| 86 |
+
|
| 87 |
+
except Exception as e:
|
| 88 |
+
sys.stderr.write(f"\nCritical error in get_leaderboard_df: {e}\n")
|
| 89 |
+
import traceback
|
| 90 |
+
sys.stderr.write(f"Traceback: {traceback.format_exc()}\n")
|
| 91 |
+
sys.stderr.flush()
|
| 92 |
+
# Return empty DataFrame as fallback
|
| 93 |
empty_df = pd.DataFrame(columns=cols)
|
| 94 |
empty_df[AutoEvalColumn.average.name] = pd.Series(dtype=float)
|
| 95 |
for col in benchmark_cols:
|
| 96 |
empty_df[col] = pd.Series(dtype=float)
|
| 97 |
return empty_df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|