| | |
| | |
| | |
| |
|
| | """ |
| | Prompt Comparison Test: Direct vs Reasoning |
| | Tests if "code only" prompt improves fine-tuned model scores on HumanEval subset |
| | """ |
| |
|
| | import os |
| | import re |
| | import json |
| | import torch |
| | from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig |
| | from peft import PeftModel |
| | from datasets import load_dataset |
| | from tqdm import tqdm |
| | from huggingface_hub import HfApi |
| |
|
| | print("=" * 60) |
| | print("PROMPT COMPARISON TEST") |
| | print("Direct Code vs Reasoning Prompt") |
| | print("=" * 60) |
| |
|
| | |
| | BASE_MODEL = "mistralai/Devstral-Small-2505" |
| | FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small" |
| | OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small" |
| | TEMPERATURE = 0.1 |
| | MAX_NEW_TOKENS = 512 |
| | NUM_SAMPLES = 50 |
| |
|
| | |
| | print(f"\nGPU available: {torch.cuda.is_available()}") |
| | if torch.cuda.is_available(): |
| | print(f"GPU: {torch.cuda.get_device_name(0)}") |
| | print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB") |
| |
|
| | |
| | bnb_config = BitsAndBytesConfig( |
| | load_in_4bit=True, |
| | bnb_4bit_quant_type="nf4", |
| | bnb_4bit_compute_dtype=torch.bfloat16, |
| | bnb_4bit_use_double_quant=True, |
| | ) |
| |
|
| | def load_dataset_subset(): |
| | print("\nLoading HumanEval...") |
| | ds = load_dataset("openai/openai_humaneval", split="test") |
| | ds = ds.select(range(min(NUM_SAMPLES, len(ds)))) |
| | print(f"Using {len(ds)} problems") |
| | return ds |
| |
|
| | def load_model(): |
| | print(f"\nLoading {BASE_MODEL} + {FINETUNED_ADAPTER}...") |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True) |
| | if tokenizer.pad_token is None: |
| | tokenizer.pad_token = tokenizer.eos_token |
| |
|
| | model = AutoModelForCausalLM.from_pretrained( |
| | BASE_MODEL, |
| | quantization_config=bnb_config, |
| | device_map="auto", |
| | trust_remote_code=True, |
| | torch_dtype=torch.bfloat16, |
| | ) |
| |
|
| | model = PeftModel.from_pretrained(model, FINETUNED_ADAPTER) |
| | model = model.merge_and_unload() |
| | model.eval() |
| | print("Model loaded and merged") |
| | return model, tokenizer |
| |
|
| | def extract_code(text): |
| | """Extract Python code from output""" |
| | |
| | m = re.findall(r'```python\s*(.*?)\s*```', text, re.DOTALL) |
| | if m: |
| | return m[-1].strip() |
| | |
| | m = re.findall(r'```\s*(.*?)\s*```', text, re.DOTALL) |
| | if m: |
| | return m[-1].strip() |
| | return text.strip() |
| |
|
| | def extract_body(code): |
| | """Extract function body if full function returned""" |
| | if code.strip().startswith("def "): |
| | lines = code.split('\n') |
| | body = [] |
| | in_func = False |
| | for line in lines: |
| | if line.strip().startswith("def "): |
| | in_func = True |
| | continue |
| | if in_func: |
| | body.append(line) |
| | if body: |
| | return '\n'.join(body) |
| | return code |
| |
|
| | def generate_direct(model, tokenizer, prompt): |
| | """Direct code prompt - no reasoning""" |
| | p = f"<s>[INST] Complete this Python function. Output ONLY the code, no explanations:\n\n{prompt}[/INST]" |
| |
|
| | inputs = tokenizer(p, return_tensors="pt", truncation=True, max_length=2048).to(model.device) |
| | with torch.no_grad(): |
| | out = model.generate( |
| | **inputs, |
| | max_new_tokens=MAX_NEW_TOKENS, |
| | temperature=TEMPERATURE, |
| | do_sample=TEMPERATURE > 0, |
| | pad_token_id=tokenizer.pad_token_id, |
| | ) |
| |
|
| | raw = tokenizer.decode(out[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True) |
| | code = extract_code(raw) |
| | code = extract_body(code) |
| |
|
| | |
| | for stop in ["\ndef ", "\nclass ", "\nif __name__"]: |
| | if stop in code: |
| | code = code[:code.index(stop)] |
| |
|
| | return code |
| |
|
| | def generate_reasoning(model, tokenizer, prompt): |
| | """Reasoning prompt - original approach""" |
| | p = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{prompt}[/INST]" |
| |
|
| | inputs = tokenizer(p, return_tensors="pt", truncation=True, max_length=2048).to(model.device) |
| | with torch.no_grad(): |
| | out = model.generate( |
| | **inputs, |
| | max_new_tokens=MAX_NEW_TOKENS * 2, |
| | temperature=TEMPERATURE, |
| | do_sample=TEMPERATURE > 0, |
| | pad_token_id=tokenizer.pad_token_id, |
| | ) |
| |
|
| | raw = tokenizer.decode(out[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True) |
| | code = extract_code(raw) |
| | code = extract_body(code) |
| |
|
| | return code |
| |
|
| | def check_syntax(code): |
| | try: |
| | compile(code, '<string>', 'exec') |
| | return True |
| | except: |
| | return False |
| |
|
| | def evaluate(samples, dataset): |
| | passed = 0 |
| | total = len(samples) |
| | ds_dict = {p["task_id"]: p for p in dataset} |
| |
|
| | for s in samples: |
| | task_id = s["task_id"] |
| | completion = s["completion"] |
| | problem = ds_dict.get(task_id) |
| | if not problem: |
| | continue |
| |
|
| | full = problem["prompt"] + completion |
| | if not check_syntax(full): |
| | continue |
| |
|
| | try: |
| | g = {} |
| | exec(full, g) |
| | entry = problem.get("entry_point", task_id.split("/")[-1]) |
| | if entry in g: |
| | passed += 1 |
| | except: |
| | pass |
| |
|
| | return {"pass@1": passed / total if total > 0 else 0, "passed": passed, "total": total} |
| |
|
| | def main(): |
| | dataset = load_dataset_subset() |
| | model, tokenizer = load_model() |
| |
|
| | |
| | print("\n" + "=" * 60) |
| | print("TEST 1: DIRECT CODE PROMPT") |
| | print("=" * 60) |
| | direct = [] |
| | for p in tqdm(dataset, desc="Direct"): |
| | try: |
| | c = generate_direct(model, tokenizer, p["prompt"]) |
| | except: |
| | c = "# error" |
| | direct.append({"task_id": p["task_id"], "completion": c}) |
| |
|
| | r_direct = evaluate(direct, dataset) |
| | print(f"Direct: {r_direct['pass@1']*100:.1f}% ({r_direct['passed']}/{r_direct['total']})") |
| |
|
| | |
| | print("\n" + "=" * 60) |
| | print("TEST 2: REASONING PROMPT") |
| | print("=" * 60) |
| | reasoning = [] |
| | for p in tqdm(dataset, desc="Reasoning"): |
| | try: |
| | c = generate_reasoning(model, tokenizer, p["prompt"]) |
| | except: |
| | c = "# error" |
| | reasoning.append({"task_id": p["task_id"], "completion": c}) |
| |
|
| | r_reason = evaluate(reasoning, dataset) |
| | print(f"Reasoning: {r_reason['pass@1']*100:.1f}% ({r_reason['passed']}/{r_reason['total']})") |
| |
|
| | |
| | print("\n" + "=" * 60) |
| | print("RESULTS SUMMARY") |
| | print("=" * 60) |
| | print(f"\n{'Prompt':<20} {'pass@1':>10}") |
| | print("-" * 35) |
| | print(f"{'Direct Code':<20} {r_direct['pass@1']*100:>9.1f}%") |
| | print(f"{'Reasoning':<20} {r_reason['pass@1']*100:>9.1f}%") |
| |
|
| | diff = (r_direct['pass@1'] - r_reason['pass@1']) * 100 |
| | print(f"\n{'Improvement:':<20} {'+' if diff >= 0 else ''}{diff:.1f}%") |
| |
|
| | |
| | results = { |
| | "experiment": "Prompt Comparison", |
| | "samples": NUM_SAMPLES, |
| | "direct": r_direct, |
| | "reasoning": r_reason, |
| | "improvement": diff |
| | } |
| |
|
| | with open("prompt_comparison.json", "w") as f: |
| | json.dump(results, f, indent=2) |
| |
|
| | try: |
| | api = HfApi() |
| | api.upload_file( |
| | path_or_fileobj="prompt_comparison.json", |
| | path_in_repo="prompt_comparison.json", |
| | repo_id=OUTPUT_REPO, |
| | repo_type="model", |
| | ) |
| | print(f"\nUploaded to {OUTPUT_REPO}") |
| | except Exception as e: |
| | print(f"Upload failed: {e}") |
| |
|
| | print("\nDONE") |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|