| |
| |
| |
|
|
| """ |
| MBPP Evaluation: Base Devstral vs Fine-tuned Alizee-Coder |
| Runs on HF Jobs with GPU support |
| |
| VERSION: 3.0 - Proper code extraction for both base and fine-tuned models |
| FIXED: |
| - Extract code from ```python blocks for base model (handles chat-like responses) |
| - Function renaming before test execution for both models |
| """ |
|
|
| import os |
| import re |
| import json |
| import torch |
| from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig |
| from peft import PeftModel |
| from datasets import load_dataset |
| from tqdm import tqdm |
| from huggingface_hub import HfApi |
|
|
| print("=" * 60) |
| print("EVALUATION: Devstral-Small vs Alizee-Coder-Devstral") |
| print("Benchmark: MBPP (Mostly Basic Python Problems)") |
| print("VERSION: Fixed function name extraction") |
| print("=" * 60) |
|
|
| |
| BASE_MODEL = "mistralai/Devstral-Small-2505" |
| FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small" |
| OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small" |
| TEMPERATURE = 0.1 |
| MAX_NEW_TOKENS = 512 |
|
|
| |
| print(f"\nGPU available: {torch.cuda.is_available()}") |
| if torch.cuda.is_available(): |
| print(f"GPU: {torch.cuda.get_device_name(0)}") |
| print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB") |
|
|
| |
| bnb_config = BitsAndBytesConfig( |
| load_in_4bit=True, |
| bnb_4bit_quant_type="nf4", |
| bnb_4bit_compute_dtype=torch.bfloat16, |
| bnb_4bit_use_double_quant=True, |
| ) |
|
|
| def load_mbpp(): |
| """Load MBPP dataset""" |
| print("\nLoading MBPP dataset...") |
| |
| dataset = load_dataset("google-research-datasets/mbpp", "sanitized", split="test") |
| print(f"Loaded {len(dataset)} problems") |
| return dataset |
|
|
| def load_model(model_name, adapter_name=None): |
| """Load model with optional LoRA adapter""" |
| print(f"\nLoading model: {model_name}") |
| if adapter_name: |
| print(f"With adapter: {adapter_name}") |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| quantization_config=bnb_config, |
| device_map="auto", |
| trust_remote_code=True, |
| torch_dtype=torch.bfloat16, |
| ) |
|
|
| if adapter_name: |
| print("Loading LoRA adapter...") |
| model = PeftModel.from_pretrained(model, adapter_name) |
| model = model.merge_and_unload() |
| print("Adapter merged") |
|
|
| model.eval() |
| return model, tokenizer |
|
|
| def extract_function_name(test_list): |
| """Extract expected function name from test cases""" |
| if not test_list: |
| return None |
|
|
| |
| |
| test = test_list[0] |
|
|
| |
| patterns = [ |
| r'assert\s+(\w+)\s*\(', |
| r'^\s*(\w+)\s*\(', |
| ] |
|
|
| for pattern in patterns: |
| match = re.search(pattern, test) |
| if match: |
| func_name = match.group(1) |
| |
| if func_name not in ['assert', 'print', 'len', 'str', 'int', 'float', 'list', 'dict', 'set', 'tuple']: |
| return func_name |
|
|
| return None |
|
|
| def extract_python_code(text): |
| """Extract Python code from model output""" |
| |
| pattern = r'```python\s*(.*?)\s*```' |
| matches = re.findall(pattern, text, re.DOTALL) |
| if matches: |
| return matches[-1].strip() |
|
|
| |
| pattern = r'```\s*(.*?)\s*```' |
| matches = re.findall(pattern, text, re.DOTALL) |
| if matches: |
| return matches[-1].strip() |
|
|
| return text.strip() |
|
|
| def generate_completion_base(model, tokenizer, prompt, func_name=None): |
| """Generate code completion for BASE model (handles both pure completion and chat-like responses)""" |
| |
| if func_name: |
| code_prompt = f"# Task: {prompt}\n# Write a Python function named {func_name}\n\n" |
| else: |
| code_prompt = f"# Task: {prompt}\n\n" |
|
|
| inputs = tokenizer(code_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device) |
|
|
| with torch.no_grad(): |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=MAX_NEW_TOKENS, |
| temperature=TEMPERATURE, |
| do_sample=True if TEMPERATURE > 0 else False, |
| pad_token_id=tokenizer.pad_token_id, |
| eos_token_id=tokenizer.eos_token_id, |
| ) |
|
|
| completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True) |
|
|
| |
| code = extract_python_code(completion) |
|
|
| |
| if not code.startswith("def "): |
| |
| match = re.search(r'(def\s+\w+\s*\([^)]*\).*?)(?=\ndef |\nclass |\n```|\Z)', completion, re.DOTALL) |
| if match: |
| code = match.group(1).strip() |
|
|
| |
| stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"] |
| for stop in stop_tokens: |
| if stop in code: |
| code = code[:code.index(stop)] |
|
|
| return code |
|
|
| def generate_completion_finetuned(model, tokenizer, prompt, func_name=None): |
| """Generate code completion for FINE-TUNED model (Instruct format)""" |
| |
| if func_name: |
| instruct_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{prompt}\n\nIMPORTANT: The function MUST be named `{func_name}`.\n[/INST]" |
| else: |
| instruct_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{prompt}\n[/INST]" |
|
|
| inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device) |
|
|
| with torch.no_grad(): |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=MAX_NEW_TOKENS * 2, |
| temperature=TEMPERATURE, |
| do_sample=True if TEMPERATURE > 0 else False, |
| pad_token_id=tokenizer.pad_token_id, |
| eos_token_id=tokenizer.eos_token_id, |
| ) |
|
|
| full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True) |
| code = extract_python_code(full_response) |
|
|
| |
| if func_name and code: |
| |
| match = re.search(r'def\s+(\w+)\s*\(', code) |
| if match and match.group(1) != func_name: |
| |
| code = re.sub(r'def\s+' + re.escape(match.group(1)) + r'\s*\(', f'def {func_name}(', code) |
|
|
| return code |
|
|
| def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False): |
| """Evaluate model on MBPP""" |
| print(f"\nEvaluating {model_name}...") |
| samples = [] |
|
|
| for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name})")): |
| task_id = problem.get("task_id", i) |
| prompt = problem["prompt"] |
| test_list = problem.get("test_list", []) |
|
|
| |
| func_name = extract_function_name(test_list) |
|
|
| try: |
| if is_finetuned: |
| completion = generate_completion_finetuned(model, tokenizer, prompt, func_name) |
| else: |
| completion = generate_completion_base(model, tokenizer, prompt, func_name) |
|
|
| samples.append({ |
| "task_id": task_id, |
| "prompt": prompt[:200], |
| "completion": completion, |
| "test_list": test_list, |
| "expected_func": func_name, |
| "model": model_name |
| }) |
| except Exception as e: |
| print(f"Error on task {task_id}: {e}") |
| samples.append({ |
| "task_id": task_id, |
| "prompt": prompt[:200], |
| "completion": "# Error during generation", |
| "test_list": test_list, |
| "expected_func": func_name, |
| "model": model_name |
| }) |
|
|
| return samples |
|
|
| def run_tests(code, test_list): |
| """Run test cases on generated code with automatic function renaming""" |
| try: |
| |
| expected_func = extract_function_name(test_list) |
|
|
| |
| if expected_func and code: |
| |
| match = re.search(r'def\s+(\w+)\s*\(', code) |
| if match: |
| actual_func = match.group(1) |
| if actual_func != expected_func: |
| |
| code = re.sub(r'\b' + re.escape(actual_func) + r'\b', expected_func, code) |
|
|
| |
| exec_globals = {} |
| exec(code, exec_globals) |
|
|
| |
| for test in test_list: |
| try: |
| exec(test, exec_globals) |
| except AssertionError: |
| return False |
| except Exception: |
| return False |
| return True |
| except Exception: |
| return False |
|
|
| def evaluate_samples(samples): |
| """Evaluate samples by running test cases""" |
| results = {"passed": 0, "failed": 0, "error": 0} |
| detailed = [] |
|
|
| for sample in samples: |
| task_id = sample["task_id"] |
| code = sample["completion"] |
| test_list = sample.get("test_list", []) |
|
|
| if not test_list: |
| results["error"] += 1 |
| detailed.append({"task_id": task_id, "status": "no_tests"}) |
| continue |
|
|
| |
| if run_tests(code, test_list): |
| results["passed"] += 1 |
| detailed.append({"task_id": task_id, "status": "passed"}) |
| else: |
| results["failed"] += 1 |
| detailed.append({"task_id": task_id, "status": "failed"}) |
|
|
| total = results["passed"] + results["failed"] |
| pass_rate = results["passed"] / total if total > 0 else 0 |
|
|
| return { |
| "pass@1": pass_rate, |
| "passed": results["passed"], |
| "failed": results["failed"], |
| "error": results["error"], |
| "total": total, |
| "detailed": detailed[:10] |
| } |
|
|
| def main(): |
| |
| dataset = load_mbpp() |
|
|
| results = {} |
|
|
| |
| print("\n" + "=" * 60) |
| print("EVALUATING BASE MODEL") |
| print("=" * 60) |
| base_model, base_tokenizer = load_model(BASE_MODEL) |
| base_samples = evaluate_model(base_model, base_tokenizer, dataset, "Devstral-Small-Base", is_finetuned=False) |
| results["base"] = evaluate_samples(base_samples) |
| print(f"\nBase Model Results: pass@1 = {results['base']['pass@1']*100:.2f}%") |
|
|
| |
| del base_model |
| torch.cuda.empty_cache() |
|
|
| |
| print("\n" + "=" * 60) |
| print("EVALUATING FINE-TUNED MODEL") |
| print("=" * 60) |
| ft_model, ft_tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER) |
| ft_samples = evaluate_model(ft_model, ft_tokenizer, dataset, "Alizee-Coder-Devstral", is_finetuned=True) |
| results["finetuned"] = evaluate_samples(ft_samples) |
| print(f"\nFine-tuned Model Results: pass@1 = {results['finetuned']['pass@1']*100:.2f}%") |
|
|
| |
| print("\n" + "=" * 60) |
| print("COMPARISON SUMMARY - MBPP") |
| print("=" * 60) |
| print(f"\n{'Model':<40} {'pass@1':>10} {'Passed':>8} {'Failed':>8}") |
| print("-" * 70) |
| print(f"{'Devstral-Small-2505 (Base)':<40} {results['base']['pass@1']*100:>9.2f}% {results['base']['passed']:>8} {results['base']['failed']:>8}") |
| print(f"{'Alizee-Coder-Devstral (Fine-tuned)':<40} {results['finetuned']['pass@1']*100:>9.2f}% {results['finetuned']['passed']:>8} {results['finetuned']['failed']:>8}") |
|
|
| improvement = (results['finetuned']['pass@1'] - results['base']['pass@1']) * 100 |
| sign = "+" if improvement >= 0 else "" |
| print(f"\n{'Improvement:':<40} {sign}{improvement:>9.2f}%") |
|
|
| |
| output = { |
| "benchmark": "MBPP", |
| "base_model": BASE_MODEL, |
| "finetuned_model": FINETUNED_ADAPTER, |
| "results": { |
| "base": { |
| "pass@1": float(results['base']['pass@1']), |
| "passed": results['base']['passed'], |
| "failed": results['base']['failed'], |
| "total": results['base']['total'] |
| }, |
| "finetuned": { |
| "pass@1": float(results['finetuned']['pass@1']), |
| "passed": results['finetuned']['passed'], |
| "failed": results['finetuned']['failed'], |
| "total": results['finetuned']['total'] |
| }, |
| "improvement": float(improvement) |
| }, |
| "samples": { |
| "base": base_samples[:5], |
| "finetuned": ft_samples[:5] |
| } |
| } |
|
|
| |
| with open("eval_results_mbpp.json", "w") as f: |
| json.dump(output, f, indent=2) |
| print("\nResults saved to eval_results_mbpp.json") |
|
|
| |
| try: |
| api = HfApi() |
| api.upload_file( |
| path_or_fileobj="eval_results_mbpp.json", |
| path_in_repo="eval_results_mbpp.json", |
| repo_id=OUTPUT_REPO, |
| repo_type="model", |
| ) |
| print(f"Results uploaded to {OUTPUT_REPO}") |
| except Exception as e: |
| print(f"Could not upload results: {e}") |
|
|
| print("\n" + "=" * 60) |
| print("EVALUATION COMPLETE") |
| print("=" * 60) |
|
|
| if __name__ == "__main__": |
| main() |
|
|