stmasson commited on
Commit
fe5a97c
·
verified ·
1 Parent(s): e9ee872

Upload scripts/eval_humaneval_v3_lite.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/eval_humaneval_v3_lite.py +336 -0
scripts/eval_humaneval_v3_lite.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["transformers>=4.46.0", "torch", "peft", "bitsandbytes", "accelerate", "datasets", "tqdm", "huggingface_hub"]
3
+ # ///
4
+
5
+ """
6
+ HumanEval Evaluation v3 LITE: Direct Code Prompt
7
+ Reduced dependencies, minimal storage usage
8
+ """
9
+
10
+ import os
11
+ import re
12
+ import json
13
+ import gc
14
+
15
+ # Reduce cache usage
16
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
17
+ os.environ["HF_HOME"] = "/tmp/hf_home"
18
+ os.environ["HF_HUB_CACHE"] = "/tmp/hf_cache"
19
+
20
+ import torch
21
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
22
+ from peft import PeftModel
23
+ from datasets import load_dataset
24
+ from tqdm import tqdm
25
+ from huggingface_hub import HfApi
26
+
27
+ print("=" * 60)
28
+ print("EVALUATION v3 LITE: Direct Code Prompt Test")
29
+ print("Benchmark: HumanEval")
30
+ print("=" * 60)
31
+
32
+ # Configuration
33
+ BASE_MODEL = "mistralai/Devstral-Small-2505"
34
+ FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small"
35
+ OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small"
36
+ TEMPERATURE = 0.1
37
+ MAX_NEW_TOKENS = 512
38
+
39
+ # Check GPU
40
+ print(f"\nGPU available: {torch.cuda.is_available()}")
41
+ if torch.cuda.is_available():
42
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
43
+ print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
44
+
45
+ # 4-bit quantization config
46
+ bnb_config = BitsAndBytesConfig(
47
+ load_in_4bit=True,
48
+ bnb_4bit_quant_type="nf4",
49
+ bnb_4bit_compute_dtype=torch.bfloat16,
50
+ bnb_4bit_use_double_quant=True,
51
+ )
52
+
53
+ def load_humaneval():
54
+ """Load HumanEval dataset"""
55
+ print("\nLoading HumanEval dataset...")
56
+ dataset = load_dataset("openai/openai_humaneval", split="test")
57
+ print(f"Loaded {len(dataset)} problems")
58
+ return dataset
59
+
60
+ def load_model(model_name, adapter_name=None):
61
+ """Load model with optional LoRA adapter"""
62
+ print(f"\nLoading model: {model_name}")
63
+ if adapter_name:
64
+ print(f"With adapter: {adapter_name}")
65
+
66
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
67
+ if tokenizer.pad_token is None:
68
+ tokenizer.pad_token = tokenizer.eos_token
69
+
70
+ model = AutoModelForCausalLM.from_pretrained(
71
+ model_name,
72
+ quantization_config=bnb_config,
73
+ device_map="auto",
74
+ trust_remote_code=True,
75
+ torch_dtype=torch.bfloat16,
76
+ low_cpu_mem_usage=True,
77
+ )
78
+
79
+ if adapter_name:
80
+ print("Loading LoRA adapter...")
81
+ model = PeftModel.from_pretrained(model, adapter_name)
82
+ model = model.merge_and_unload()
83
+ print("Adapter merged")
84
+
85
+ model.eval()
86
+
87
+ # Clear cache after loading
88
+ gc.collect()
89
+ torch.cuda.empty_cache()
90
+
91
+ return model, tokenizer
92
+
93
+ def extract_python_code(text):
94
+ """Extract Python code from model output"""
95
+ # Try ```python blocks
96
+ pattern = r'```python\s*(.*?)\s*```'
97
+ matches = re.findall(pattern, text, re.DOTALL)
98
+ if matches:
99
+ return matches[-1].strip()
100
+
101
+ # Try ``` blocks
102
+ pattern = r'```\s*(.*?)\s*```'
103
+ matches = re.findall(pattern, text, re.DOTALL)
104
+ if matches:
105
+ return matches[-1].strip()
106
+
107
+ return text.strip()
108
+
109
+ def generate_completion_direct(model, tokenizer, prompt):
110
+ """Generate code with DIRECT CODE prompt (no reasoning)"""
111
+ instruct_prompt = f"""<s>[INST] Complete this Python function. Output ONLY the function body code, no explanations:
112
+
113
+ {prompt}[/INST]"""
114
+
115
+ inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
116
+
117
+ with torch.no_grad():
118
+ outputs = model.generate(
119
+ **inputs,
120
+ max_new_tokens=MAX_NEW_TOKENS,
121
+ temperature=TEMPERATURE,
122
+ do_sample=True if TEMPERATURE > 0 else False,
123
+ pad_token_id=tokenizer.pad_token_id,
124
+ eos_token_id=tokenizer.eos_token_id,
125
+ )
126
+
127
+ raw_completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
128
+ completion = extract_python_code(raw_completion)
129
+
130
+ if completion.strip().startswith("def "):
131
+ lines = completion.split('\n')
132
+ body_lines = []
133
+ in_function = False
134
+ for line in lines:
135
+ if line.strip().startswith("def "):
136
+ in_function = True
137
+ continue
138
+ if in_function:
139
+ body_lines.append(line)
140
+ if body_lines:
141
+ completion = '\n'.join(body_lines)
142
+ elif completion == raw_completion.strip():
143
+ completion = raw_completion
144
+
145
+ stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
146
+ for stop in stop_tokens:
147
+ if stop in completion:
148
+ completion = completion[:completion.index(stop)]
149
+
150
+ return completion
151
+
152
+ def generate_completion_reasoning(model, tokenizer, prompt):
153
+ """Generate code with REASONING prompt (original approach)"""
154
+ instruct_prompt = f"""<s>[INST] Solve this programming problem with detailed reasoning:
155
+
156
+ Complete the following function:
157
+ {prompt}[/INST]"""
158
+
159
+ inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
160
+
161
+ with torch.no_grad():
162
+ outputs = model.generate(
163
+ **inputs,
164
+ max_new_tokens=MAX_NEW_TOKENS * 2,
165
+ temperature=TEMPERATURE,
166
+ do_sample=True if TEMPERATURE > 0 else False,
167
+ pad_token_id=tokenizer.pad_token_id,
168
+ eos_token_id=tokenizer.eos_token_id,
169
+ )
170
+
171
+ full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
172
+ code = extract_python_code(full_response)
173
+
174
+ if "def " in code:
175
+ lines = code.split('\n')
176
+ result_lines = []
177
+ in_function = False
178
+ for line in lines:
179
+ if line.strip().startswith("def "):
180
+ in_function = True
181
+ continue
182
+ if in_function:
183
+ result_lines.append(line)
184
+ if result_lines:
185
+ return '\n'.join(result_lines)
186
+
187
+ return code
188
+
189
+ def simple_syntax_check(code):
190
+ """Basic syntax validation"""
191
+ try:
192
+ compile(code, '<string>', 'exec')
193
+ return True
194
+ except SyntaxError:
195
+ return False
196
+
197
+ def evaluate_samples(samples, dataset):
198
+ """Evaluate samples"""
199
+ results = {"passed": 0, "failed": 0, "error": 0}
200
+
201
+ dataset_dict = {p["task_id"]: p for p in dataset}
202
+
203
+ for sample in samples:
204
+ task_id = sample["task_id"]
205
+ completion = sample["completion"]
206
+
207
+ problem = dataset_dict.get(task_id)
208
+ if problem is None:
209
+ results["error"] += 1
210
+ continue
211
+
212
+ full_code = problem["prompt"] + completion
213
+
214
+ if not simple_syntax_check(full_code):
215
+ results["failed"] += 1
216
+ continue
217
+
218
+ try:
219
+ exec_globals = {}
220
+ exec(full_code, exec_globals)
221
+ entry_point = problem.get("entry_point", task_id.split("/")[-1])
222
+ if entry_point in exec_globals:
223
+ results["passed"] += 1
224
+ else:
225
+ results["failed"] += 1
226
+ except Exception:
227
+ results["error"] += 1
228
+
229
+ total = len(samples)
230
+ pass_rate = results["passed"] / total if total > 0 else 0
231
+
232
+ return {
233
+ "pass@1": pass_rate,
234
+ "passed": results["passed"],
235
+ "failed": results["failed"],
236
+ "error": results["error"],
237
+ "total": total
238
+ }
239
+
240
+ def main():
241
+ dataset = load_humaneval()
242
+
243
+ # Load model
244
+ print("\n" + "=" * 60)
245
+ print("LOADING FINE-TUNED MODEL")
246
+ print("=" * 60)
247
+ model, tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER)
248
+
249
+ results = {}
250
+
251
+ # Test 1: Direct prompt
252
+ print("\n" + "=" * 60)
253
+ print("TEST 1: DIRECT CODE PROMPT")
254
+ print("=" * 60)
255
+ direct_samples = []
256
+ for problem in tqdm(dataset, desc="Direct Prompt"):
257
+ try:
258
+ completion = generate_completion_direct(model, tokenizer, problem["prompt"])
259
+ direct_samples.append({
260
+ "task_id": problem["task_id"],
261
+ "completion": completion,
262
+ })
263
+ except Exception as e:
264
+ direct_samples.append({
265
+ "task_id": problem["task_id"],
266
+ "completion": "# Error",
267
+ })
268
+ results["direct"] = evaluate_samples(direct_samples, dataset)
269
+ print(f"\nDirect Prompt: pass@1 = {results['direct']['pass@1']*100:.2f}%")
270
+
271
+ # Test 2: Reasoning prompt
272
+ print("\n" + "=" * 60)
273
+ print("TEST 2: REASONING PROMPT")
274
+ print("=" * 60)
275
+ reasoning_samples = []
276
+ for problem in tqdm(dataset, desc="Reasoning Prompt"):
277
+ try:
278
+ completion = generate_completion_reasoning(model, tokenizer, problem["prompt"])
279
+ reasoning_samples.append({
280
+ "task_id": problem["task_id"],
281
+ "completion": completion,
282
+ })
283
+ except Exception as e:
284
+ reasoning_samples.append({
285
+ "task_id": problem["task_id"],
286
+ "completion": "# Error",
287
+ })
288
+ results["reasoning"] = evaluate_samples(reasoning_samples, dataset)
289
+ print(f"\nReasoning Prompt: pass@1 = {results['reasoning']['pass@1']*100:.2f}%")
290
+
291
+ # Summary
292
+ print("\n" + "=" * 60)
293
+ print("PROMPT COMPARISON - HumanEval")
294
+ print("=" * 60)
295
+ print(f"\n{'Prompt Type':<25} {'pass@1':>10} {'Passed':>8} {'Failed':>8}")
296
+ print("-" * 55)
297
+ print(f"{'Direct Code':<25} {results['direct']['pass@1']*100:>9.2f}% {results['direct']['passed']:>8} {results['direct']['failed']:>8}")
298
+ print(f"{'Reasoning':<25} {results['reasoning']['pass@1']*100:>9.2f}% {results['reasoning']['passed']:>8} {results['reasoning']['failed']:>8}")
299
+
300
+ improvement = (results['direct']['pass@1'] - results['reasoning']['pass@1']) * 100
301
+ sign = "+" if improvement >= 0 else ""
302
+ print(f"\n{'Improvement:':<25} {sign}{improvement:>9.2f}%")
303
+ print(f"{'Base Model Reference:':<25} {'82.93%':>10}")
304
+
305
+ # Save
306
+ output = {
307
+ "benchmark": "HumanEval",
308
+ "experiment": "Prompt Comparison",
309
+ "results": {
310
+ "direct": results["direct"],
311
+ "reasoning": results["reasoning"],
312
+ "improvement": float(improvement)
313
+ }
314
+ }
315
+
316
+ with open("eval_prompt_comparison.json", "w") as f:
317
+ json.dump(output, f, indent=2)
318
+
319
+ try:
320
+ api = HfApi()
321
+ api.upload_file(
322
+ path_or_fileobj="eval_prompt_comparison.json",
323
+ path_in_repo="eval_prompt_comparison.json",
324
+ repo_id=OUTPUT_REPO,
325
+ repo_type="model",
326
+ )
327
+ print(f"\nResults uploaded to {OUTPUT_REPO}")
328
+ except Exception as e:
329
+ print(f"Could not upload: {e}")
330
+
331
+ print("\n" + "=" * 60)
332
+ print("EVALUATION COMPLETE")
333
+ print("=" * 60)
334
+
335
+ if __name__ == "__main__":
336
+ main()