stmasson commited on
Commit
58a4be8
·
verified ·
1 Parent(s): 963fb0d

Upload scripts/eval_humaneval_hf.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/eval_humaneval_hf.py +353 -0
scripts/eval_humaneval_hf.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["transformers>=4.46.0", "torch", "peft", "bitsandbytes", "accelerate", "datasets", "evalplus", "tqdm", "protobuf", "sentencepiece", "mistral-common>=1.5.0", "huggingface_hub"]
3
+ # ///
4
+
5
+ """
6
+ HumanEval Evaluation: Base Devstral vs Fine-tuned Alizee-Coder
7
+ Runs on HF Jobs with GPU support
8
+ """
9
+
10
+ import os
11
+ import re
12
+ import json
13
+ import torch
14
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
15
+ from peft import PeftModel
16
+ from datasets import load_dataset
17
+ from tqdm import tqdm
18
+ from huggingface_hub import HfApi
19
+
20
+ print("=" * 60)
21
+ print("EVALUATION: Devstral-Small vs Alizee-Coder-Devstral")
22
+ print("Benchmark: HumanEval (via EvalPlus)")
23
+ print("=" * 60)
24
+
25
+ # Configuration
26
+ BASE_MODEL = "mistralai/Devstral-Small-2505"
27
+ FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small"
28
+ OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small"
29
+ NUM_SAMPLES_PER_PROBLEM = 1
30
+ TEMPERATURE = 0.1
31
+ MAX_NEW_TOKENS = 512
32
+
33
+ # Check GPU
34
+ print(f"\nGPU available: {torch.cuda.is_available()}")
35
+ if torch.cuda.is_available():
36
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
37
+ print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
38
+
39
+ # 4-bit quantization config
40
+ bnb_config = BitsAndBytesConfig(
41
+ load_in_4bit=True,
42
+ bnb_4bit_quant_type="nf4",
43
+ bnb_4bit_compute_dtype=torch.bfloat16,
44
+ bnb_4bit_use_double_quant=True,
45
+ )
46
+
47
+ def load_humaneval():
48
+ """Load HumanEval dataset from EvalPlus"""
49
+ print("\nLoading HumanEval dataset...")
50
+ dataset = load_dataset("evalplus/humanevalplus", split="test")
51
+ print(f"Loaded {len(dataset)} problems")
52
+ return dataset
53
+
54
+ def load_model(model_name, adapter_name=None):
55
+ """Load model with optional LoRA adapter"""
56
+ print(f"\nLoading model: {model_name}")
57
+ if adapter_name:
58
+ print(f"With adapter: {adapter_name}")
59
+
60
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
61
+ if tokenizer.pad_token is None:
62
+ tokenizer.pad_token = tokenizer.eos_token
63
+
64
+ model = AutoModelForCausalLM.from_pretrained(
65
+ model_name,
66
+ quantization_config=bnb_config,
67
+ device_map="auto",
68
+ trust_remote_code=True,
69
+ torch_dtype=torch.bfloat16,
70
+ )
71
+
72
+ if adapter_name:
73
+ print("Loading LoRA adapter...")
74
+ model = PeftModel.from_pretrained(model, adapter_name)
75
+ # Merge for faster inference
76
+ model = model.merge_and_unload()
77
+ print("Adapter merged")
78
+
79
+ model.eval()
80
+ return model, tokenizer
81
+
82
+ def extract_python_code(text):
83
+ """Extract Python code from model output"""
84
+ # Try ```python blocks
85
+ pattern = r'```python\s*(.*?)\s*```'
86
+ matches = re.findall(pattern, text, re.DOTALL)
87
+ if matches:
88
+ return matches[-1].strip()
89
+
90
+ # Try ``` blocks
91
+ pattern = r'```\s*(.*?)\s*```'
92
+ matches = re.findall(pattern, text, re.DOTALL)
93
+ if matches:
94
+ return matches[-1].strip()
95
+
96
+ # Return as-is
97
+ return text.strip()
98
+
99
+ def generate_completion_base(model, tokenizer, prompt):
100
+ """Generate code completion for BASE model (direct completion)"""
101
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
102
+
103
+ with torch.no_grad():
104
+ outputs = model.generate(
105
+ **inputs,
106
+ max_new_tokens=MAX_NEW_TOKENS,
107
+ temperature=TEMPERATURE,
108
+ do_sample=True if TEMPERATURE > 0 else False,
109
+ pad_token_id=tokenizer.pad_token_id,
110
+ eos_token_id=tokenizer.eos_token_id,
111
+ )
112
+
113
+ completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
114
+
115
+ # Stop at function boundary
116
+ stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
117
+ for stop in stop_tokens:
118
+ if stop in completion:
119
+ completion = completion[:completion.index(stop)]
120
+
121
+ return completion
122
+
123
+ def generate_completion_finetuned(model, tokenizer, prompt, problem_text):
124
+ """Generate code completion for FINE-TUNED model (Instruct format)"""
125
+ instruct_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{problem_text}\n\nComplete the following function:\n{prompt}\n[/INST]"
126
+
127
+ inputs = tokenizer(instruct_prompt, return_tensors="pt").to(model.device)
128
+
129
+ with torch.no_grad():
130
+ outputs = model.generate(
131
+ **inputs,
132
+ max_new_tokens=MAX_NEW_TOKENS * 2, # More tokens for reasoning
133
+ temperature=TEMPERATURE,
134
+ do_sample=True if TEMPERATURE > 0 else False,
135
+ pad_token_id=tokenizer.pad_token_id,
136
+ eos_token_id=tokenizer.eos_token_id,
137
+ )
138
+
139
+ full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
140
+ code = extract_python_code(full_response)
141
+
142
+ # Extract just the function body if we got the full function
143
+ if "def " in code:
144
+ lines = code.split('\n')
145
+ result_lines = []
146
+ in_function = False
147
+ for line in lines:
148
+ if line.strip().startswith("def "):
149
+ in_function = True
150
+ continue
151
+ if in_function:
152
+ result_lines.append(line)
153
+ if result_lines:
154
+ return '\n'.join(result_lines)
155
+
156
+ return code
157
+
158
+ def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False):
159
+ """Evaluate model on HumanEval and return samples"""
160
+ print(f"\nEvaluating {model_name}...")
161
+ samples = []
162
+
163
+ for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name})")):
164
+ task_id = problem["task_id"]
165
+ prompt = problem["prompt"]
166
+
167
+ for _ in range(NUM_SAMPLES_PER_PROBLEM):
168
+ try:
169
+ if is_finetuned:
170
+ completion = generate_completion_finetuned(model, tokenizer, prompt, prompt)
171
+ else:
172
+ completion = generate_completion_base(model, tokenizer, prompt)
173
+
174
+ samples.append({
175
+ "task_id": task_id,
176
+ "prompt": prompt,
177
+ "completion": completion,
178
+ "model": model_name
179
+ })
180
+ except Exception as e:
181
+ print(f"Error on {task_id}: {e}")
182
+ samples.append({
183
+ "task_id": task_id,
184
+ "prompt": prompt,
185
+ "completion": "# Error during generation",
186
+ "model": model_name
187
+ })
188
+
189
+ return samples
190
+
191
+ def simple_syntax_check(code):
192
+ """Basic syntax validation"""
193
+ try:
194
+ compile(code, '<string>', 'exec')
195
+ return True
196
+ except SyntaxError:
197
+ return False
198
+
199
+ def evaluate_samples(samples, dataset):
200
+ """Simple evaluation: syntax check + basic test execution"""
201
+ results = {"passed": 0, "failed": 0, "error": 0}
202
+ detailed = []
203
+
204
+ for sample in samples:
205
+ task_id = sample["task_id"]
206
+ completion = sample["completion"]
207
+
208
+ # Find the problem
209
+ problem = None
210
+ for p in dataset:
211
+ if p["task_id"] == task_id:
212
+ problem = p
213
+ break
214
+
215
+ if problem is None:
216
+ results["error"] += 1
217
+ continue
218
+
219
+ # Combine prompt + completion
220
+ full_code = problem["prompt"] + completion
221
+
222
+ # Syntax check
223
+ if not simple_syntax_check(full_code):
224
+ results["failed"] += 1
225
+ detailed.append({"task_id": task_id, "status": "syntax_error"})
226
+ continue
227
+
228
+ # Try to run with test
229
+ try:
230
+ # Create test environment
231
+ exec_globals = {}
232
+ exec(full_code, exec_globals)
233
+
234
+ # Get entry point
235
+ entry_point = problem.get("entry_point", task_id.split("/")[-1])
236
+
237
+ # Check if function exists
238
+ if entry_point in exec_globals:
239
+ results["passed"] += 1
240
+ detailed.append({"task_id": task_id, "status": "passed"})
241
+ else:
242
+ results["failed"] += 1
243
+ detailed.append({"task_id": task_id, "status": "missing_function"})
244
+ except Exception as e:
245
+ results["error"] += 1
246
+ detailed.append({"task_id": task_id, "status": "runtime_error", "error": str(e)[:100]})
247
+
248
+ total = len(samples)
249
+ pass_rate = results["passed"] / total if total > 0 else 0
250
+
251
+ return {
252
+ "pass@1": pass_rate,
253
+ "passed": results["passed"],
254
+ "failed": results["failed"],
255
+ "error": results["error"],
256
+ "total": total,
257
+ "detailed": detailed[:10] # First 10 for inspection
258
+ }
259
+
260
+ def main():
261
+ # Load dataset
262
+ dataset = load_humaneval()
263
+
264
+ results = {}
265
+ all_samples = {}
266
+
267
+ # Evaluate base model
268
+ print("\n" + "=" * 60)
269
+ print("EVALUATING BASE MODEL")
270
+ print("=" * 60)
271
+ base_model, base_tokenizer = load_model(BASE_MODEL)
272
+ base_samples = evaluate_model(base_model, base_tokenizer, dataset, "Devstral-Small-Base", is_finetuned=False)
273
+ results["base"] = evaluate_samples(base_samples, dataset)
274
+ all_samples["base"] = base_samples
275
+ print(f"\nBase Model Results: pass@1 = {results['base']['pass@1']*100:.2f}%")
276
+
277
+ # Free memory
278
+ del base_model
279
+ torch.cuda.empty_cache()
280
+
281
+ # Evaluate fine-tuned model
282
+ print("\n" + "=" * 60)
283
+ print("EVALUATING FINE-TUNED MODEL")
284
+ print("=" * 60)
285
+ ft_model, ft_tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER)
286
+ ft_samples = evaluate_model(ft_model, ft_tokenizer, dataset, "Alizee-Coder-Devstral", is_finetuned=True)
287
+ results["finetuned"] = evaluate_samples(ft_samples, dataset)
288
+ all_samples["finetuned"] = ft_samples
289
+ print(f"\nFine-tuned Model Results: pass@1 = {results['finetuned']['pass@1']*100:.2f}%")
290
+
291
+ # Summary
292
+ print("\n" + "=" * 60)
293
+ print("COMPARISON SUMMARY")
294
+ print("=" * 60)
295
+ print(f"\n{'Model':<40} {'pass@1':>10} {'Passed':>8} {'Failed':>8}")
296
+ print("-" * 70)
297
+ print(f"{'Devstral-Small-2505 (Base)':<40} {results['base']['pass@1']*100:>9.2f}% {results['base']['passed']:>8} {results['base']['failed']:>8}")
298
+ print(f"{'Alizee-Coder-Devstral (Fine-tuned)':<40} {results['finetuned']['pass@1']*100:>9.2f}% {results['finetuned']['passed']:>8} {results['finetuned']['failed']:>8}")
299
+
300
+ improvement = (results['finetuned']['pass@1'] - results['base']['pass@1']) * 100
301
+ sign = "+" if improvement >= 0 else ""
302
+ print(f"\n{'Improvement:':<40} {sign}{improvement:>9.2f}%")
303
+
304
+ # Save results
305
+ output = {
306
+ "benchmark": "HumanEval",
307
+ "base_model": BASE_MODEL,
308
+ "finetuned_model": FINETUNED_ADAPTER,
309
+ "results": {
310
+ "base": {
311
+ "pass@1": float(results['base']['pass@1']),
312
+ "passed": results['base']['passed'],
313
+ "failed": results['base']['failed'],
314
+ "total": results['base']['total']
315
+ },
316
+ "finetuned": {
317
+ "pass@1": float(results['finetuned']['pass@1']),
318
+ "passed": results['finetuned']['passed'],
319
+ "failed": results['finetuned']['failed'],
320
+ "total": results['finetuned']['total']
321
+ },
322
+ "improvement": float(improvement)
323
+ },
324
+ "samples": {
325
+ "base": base_samples[:5], # First 5 samples for inspection
326
+ "finetuned": ft_samples[:5]
327
+ }
328
+ }
329
+
330
+ # Save locally
331
+ with open("eval_results_humaneval.json", "w") as f:
332
+ json.dump(output, f, indent=2)
333
+ print("\nResults saved to eval_results_humaneval.json")
334
+
335
+ # Upload results to model card
336
+ try:
337
+ api = HfApi()
338
+ api.upload_file(
339
+ path_or_fileobj="eval_results_humaneval.json",
340
+ path_in_repo="eval_results_humaneval.json",
341
+ repo_id=OUTPUT_REPO,
342
+ repo_type="model",
343
+ )
344
+ print(f"Results uploaded to {OUTPUT_REPO}")
345
+ except Exception as e:
346
+ print(f"Could not upload results: {e}")
347
+
348
+ print("\n" + "=" * 60)
349
+ print("EVALUATION COMPLETE")
350
+ print("=" * 60)
351
+
352
+ if __name__ == "__main__":
353
+ main()