| import uuid |
| import json |
| from typing import List |
| from .llm_client import ask_ollama |
| from .prompts import build_review_prompt |
| from ..models.schemas import Finding, ReviewResponse |
|
|
| def make_id() -> str: |
| return str(uuid.uuid4())[:8] |
|
|
| def review_single_file(path: str, filename: str, code: str, analyzer_evidence: List[dict], persona: str = "general") -> ReviewResponse: |
| prompt = build_review_prompt(filename, code, analyzer_evidence, persona) |
| raw = ask_ollama(prompt) |
|
|
| findings = [] |
| try: |
| arr = json.loads(raw) |
| for item in arr: |
| findings.append(Finding( |
| id=item.get("id", make_id()), |
| title=item.get("title", "Untitled"), |
| severity=item.get("severity", "low"), |
| file=filename, |
| line_range=tuple(item.get("line_range")) if item.get("line_range") else None, |
| description=item.get("description", ""), |
| suggested_fix=item.get("suggested_fix"), |
| confidence=int(item.get("confidence", 50)), |
| evidence=item.get("evidence") |
| )) |
| except Exception: |
| findings.append(Finding( |
| id=make_id(), |
| title="Raw Review", |
| severity="low", |
| file=filename, |
| line_range=None, |
| description=(raw or "No response from LLM")[:3000], |
| suggested_fix=None, |
| confidence=50, |
| evidence=None |
| )) |
|
|
| summary = f"Found {len(findings)} issues (persona={persona})." |
| return ReviewResponse(summary=summary, findings=findings) |
|
|