stmasson commited on
Commit
d66cd5b
·
verified ·
1 Parent(s): 023e8b5

Upload scripts/eval_prompt_test.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/eval_prompt_test.py +255 -0
scripts/eval_prompt_test.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["transformers>=4.46.0", "torch", "peft", "bitsandbytes", "accelerate", "datasets", "tqdm", "protobuf", "sentencepiece", "mistral-common>=1.5.0", "huggingface_hub"]
3
+ # ///
4
+
5
+ """
6
+ Prompt Comparison Test: Direct vs Reasoning
7
+ Tests if "code only" prompt improves fine-tuned model scores on HumanEval subset
8
+ """
9
+
10
+ import os
11
+ import re
12
+ import json
13
+ import torch
14
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
15
+ from peft import PeftModel
16
+ from datasets import load_dataset
17
+ from tqdm import tqdm
18
+ from huggingface_hub import HfApi
19
+
20
+ print("=" * 60)
21
+ print("PROMPT COMPARISON TEST")
22
+ print("Direct Code vs Reasoning Prompt")
23
+ print("=" * 60)
24
+
25
+ # Configuration
26
+ BASE_MODEL = "mistralai/Devstral-Small-2505"
27
+ FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small"
28
+ OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small"
29
+ TEMPERATURE = 0.1
30
+ MAX_NEW_TOKENS = 512
31
+ NUM_SAMPLES = 50 # Subset for quick test
32
+
33
+ # GPU
34
+ print(f"\nGPU available: {torch.cuda.is_available()}")
35
+ if torch.cuda.is_available():
36
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
37
+ print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
38
+
39
+ # 4-bit config
40
+ bnb_config = BitsAndBytesConfig(
41
+ load_in_4bit=True,
42
+ bnb_4bit_quant_type="nf4",
43
+ bnb_4bit_compute_dtype=torch.bfloat16,
44
+ bnb_4bit_use_double_quant=True,
45
+ )
46
+
47
+ def load_dataset_subset():
48
+ print("\nLoading HumanEval...")
49
+ ds = load_dataset("openai/openai_humaneval", split="test")
50
+ ds = ds.select(range(min(NUM_SAMPLES, len(ds))))
51
+ print(f"Using {len(ds)} problems")
52
+ return ds
53
+
54
+ def load_model():
55
+ print(f"\nLoading {BASE_MODEL} + {FINETUNED_ADAPTER}...")
56
+
57
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
58
+ if tokenizer.pad_token is None:
59
+ tokenizer.pad_token = tokenizer.eos_token
60
+
61
+ model = AutoModelForCausalLM.from_pretrained(
62
+ BASE_MODEL,
63
+ quantization_config=bnb_config,
64
+ device_map="auto",
65
+ trust_remote_code=True,
66
+ torch_dtype=torch.bfloat16,
67
+ )
68
+
69
+ model = PeftModel.from_pretrained(model, FINETUNED_ADAPTER)
70
+ model = model.merge_and_unload()
71
+ model.eval()
72
+ print("Model loaded and merged")
73
+ return model, tokenizer
74
+
75
+ def extract_code(text):
76
+ """Extract Python code from output"""
77
+ # Try ```python blocks
78
+ m = re.findall(r'```python\s*(.*?)\s*```', text, re.DOTALL)
79
+ if m:
80
+ return m[-1].strip()
81
+ # Try ``` blocks
82
+ m = re.findall(r'```\s*(.*?)\s*```', text, re.DOTALL)
83
+ if m:
84
+ return m[-1].strip()
85
+ return text.strip()
86
+
87
+ def extract_body(code):
88
+ """Extract function body if full function returned"""
89
+ if code.strip().startswith("def "):
90
+ lines = code.split('\n')
91
+ body = []
92
+ in_func = False
93
+ for line in lines:
94
+ if line.strip().startswith("def "):
95
+ in_func = True
96
+ continue
97
+ if in_func:
98
+ body.append(line)
99
+ if body:
100
+ return '\n'.join(body)
101
+ return code
102
+
103
+ def generate_direct(model, tokenizer, prompt):
104
+ """Direct code prompt - no reasoning"""
105
+ p = f"<s>[INST] Complete this Python function. Output ONLY the code, no explanations:\n\n{prompt}[/INST]"
106
+
107
+ inputs = tokenizer(p, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
108
+ with torch.no_grad():
109
+ out = model.generate(
110
+ **inputs,
111
+ max_new_tokens=MAX_NEW_TOKENS,
112
+ temperature=TEMPERATURE,
113
+ do_sample=TEMPERATURE > 0,
114
+ pad_token_id=tokenizer.pad_token_id,
115
+ )
116
+
117
+ raw = tokenizer.decode(out[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
118
+ code = extract_code(raw)
119
+ code = extract_body(code)
120
+
121
+ # Stop at boundaries
122
+ for stop in ["\ndef ", "\nclass ", "\nif __name__"]:
123
+ if stop in code:
124
+ code = code[:code.index(stop)]
125
+
126
+ return code
127
+
128
+ def generate_reasoning(model, tokenizer, prompt):
129
+ """Reasoning prompt - original approach"""
130
+ p = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{prompt}[/INST]"
131
+
132
+ inputs = tokenizer(p, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
133
+ with torch.no_grad():
134
+ out = model.generate(
135
+ **inputs,
136
+ max_new_tokens=MAX_NEW_TOKENS * 2,
137
+ temperature=TEMPERATURE,
138
+ do_sample=TEMPERATURE > 0,
139
+ pad_token_id=tokenizer.pad_token_id,
140
+ )
141
+
142
+ raw = tokenizer.decode(out[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
143
+ code = extract_code(raw)
144
+ code = extract_body(code)
145
+
146
+ return code
147
+
148
+ def check_syntax(code):
149
+ try:
150
+ compile(code, '<string>', 'exec')
151
+ return True
152
+ except:
153
+ return False
154
+
155
+ def evaluate(samples, dataset):
156
+ passed = 0
157
+ total = len(samples)
158
+ ds_dict = {p["task_id"]: p for p in dataset}
159
+
160
+ for s in samples:
161
+ task_id = s["task_id"]
162
+ completion = s["completion"]
163
+ problem = ds_dict.get(task_id)
164
+ if not problem:
165
+ continue
166
+
167
+ full = problem["prompt"] + completion
168
+ if not check_syntax(full):
169
+ continue
170
+
171
+ try:
172
+ g = {}
173
+ exec(full, g)
174
+ entry = problem.get("entry_point", task_id.split("/")[-1])
175
+ if entry in g:
176
+ passed += 1
177
+ except:
178
+ pass
179
+
180
+ return {"pass@1": passed / total if total > 0 else 0, "passed": passed, "total": total}
181
+
182
+ def main():
183
+ dataset = load_dataset_subset()
184
+ model, tokenizer = load_model()
185
+
186
+ # Test 1: Direct prompt
187
+ print("\n" + "=" * 60)
188
+ print("TEST 1: DIRECT CODE PROMPT")
189
+ print("=" * 60)
190
+ direct = []
191
+ for p in tqdm(dataset, desc="Direct"):
192
+ try:
193
+ c = generate_direct(model, tokenizer, p["prompt"])
194
+ except:
195
+ c = "# error"
196
+ direct.append({"task_id": p["task_id"], "completion": c})
197
+
198
+ r_direct = evaluate(direct, dataset)
199
+ print(f"Direct: {r_direct['pass@1']*100:.1f}% ({r_direct['passed']}/{r_direct['total']})")
200
+
201
+ # Test 2: Reasoning prompt
202
+ print("\n" + "=" * 60)
203
+ print("TEST 2: REASONING PROMPT")
204
+ print("=" * 60)
205
+ reasoning = []
206
+ for p in tqdm(dataset, desc="Reasoning"):
207
+ try:
208
+ c = generate_reasoning(model, tokenizer, p["prompt"])
209
+ except:
210
+ c = "# error"
211
+ reasoning.append({"task_id": p["task_id"], "completion": c})
212
+
213
+ r_reason = evaluate(reasoning, dataset)
214
+ print(f"Reasoning: {r_reason['pass@1']*100:.1f}% ({r_reason['passed']}/{r_reason['total']})")
215
+
216
+ # Summary
217
+ print("\n" + "=" * 60)
218
+ print("RESULTS SUMMARY")
219
+ print("=" * 60)
220
+ print(f"\n{'Prompt':<20} {'pass@1':>10}")
221
+ print("-" * 35)
222
+ print(f"{'Direct Code':<20} {r_direct['pass@1']*100:>9.1f}%")
223
+ print(f"{'Reasoning':<20} {r_reason['pass@1']*100:>9.1f}%")
224
+
225
+ diff = (r_direct['pass@1'] - r_reason['pass@1']) * 100
226
+ print(f"\n{'Improvement:':<20} {'+' if diff >= 0 else ''}{diff:.1f}%")
227
+
228
+ # Save
229
+ results = {
230
+ "experiment": "Prompt Comparison",
231
+ "samples": NUM_SAMPLES,
232
+ "direct": r_direct,
233
+ "reasoning": r_reason,
234
+ "improvement": diff
235
+ }
236
+
237
+ with open("prompt_comparison.json", "w") as f:
238
+ json.dump(results, f, indent=2)
239
+
240
+ try:
241
+ api = HfApi()
242
+ api.upload_file(
243
+ path_or_fileobj="prompt_comparison.json",
244
+ path_in_repo="prompt_comparison.json",
245
+ repo_id=OUTPUT_REPO,
246
+ repo_type="model",
247
+ )
248
+ print(f"\nUploaded to {OUTPUT_REPO}")
249
+ except Exception as e:
250
+ print(f"Upload failed: {e}")
251
+
252
+ print("\nDONE")
253
+
254
+ if __name__ == "__main__":
255
+ main()