| from __future__ import annotations |
|
|
| |
|
|
| import argparse |
| import csv |
| import re |
| import subprocess |
| from pathlib import Path |
| from typing import Dict, Optional |
|
|
| import sys |
|
|
| try: |
| from scripts.defextra_markers import ( |
| DocIndex, |
| HASH_VERSION, |
| TokenIndex, |
| build_tei_index, |
| doi_suffix, |
| extract_ids_from_tei, |
| extract_text_from_pdf, |
| hash_token_sequence, |
| normalize_arxiv, |
| normalize_doi, |
| normalize_paper_id, |
| strip_citations, |
| tokenize_text, |
| ) |
| from scripts.defextra_pdf_aliases import candidate_pdf_aliases |
| except ModuleNotFoundError as exc: |
| if exc.name != "scripts": |
| raise |
| PROJECT_ROOT = Path(__file__).resolve().parent.parent |
| if str(PROJECT_ROOT) not in sys.path: |
| sys.path.insert(0, str(PROJECT_ROOT)) |
| from scripts.defextra_markers import ( |
| DocIndex, |
| HASH_VERSION, |
| TokenIndex, |
| build_tei_index, |
| doi_suffix, |
| extract_ids_from_tei, |
| extract_text_from_pdf, |
| hash_token_sequence, |
| normalize_arxiv, |
| normalize_doi, |
| normalize_paper_id, |
| strip_citations, |
| tokenize_text, |
| ) |
| from scripts.defextra_pdf_aliases import candidate_pdf_aliases |
|
|
| TRAILING_PUNCT = set(".,;:?!)]}\"'") |
| END_PUNCT = {".", ",", ";", ":", "?", "!"} |
| TRAILING_QUOTES = {"'", '"', "”", "’", ")", "]"} |
| CITATION_BRACKET_RE = re.compile(r"\[[0-9][0-9,;\s\-–]*\]") |
| CITATION_PAREN_RE = re.compile(r"\([^)]*\d{4}[^)]*\)") |
|
|
|
|
| def _extend_span_end(doc_text: str, end: int) -> int: |
| if end < 0: |
| return end |
| limit = len(doc_text) |
| while end < limit and doc_text[end] in TRAILING_PUNCT: |
| end += 1 |
| j = end |
| while j < limit and doc_text[j].isspace(): |
| j += 1 |
| if j < limit and doc_text[j] in TRAILING_PUNCT: |
| end = j + 1 |
| while end < limit and doc_text[end] in TRAILING_PUNCT: |
| end += 1 |
| return end |
|
|
|
|
| def _extract_with_trailing_punct( |
| doc_text: str, |
| start: Optional[int], |
| end: Optional[int], |
| ) -> str: |
| if start is None or end is None: |
| return "" |
| if start < 0 or end > len(doc_text) or start >= end: |
| return "" |
| end = _extend_span_end(doc_text, end) |
| return doc_text[start:end] |
|
|
|
|
| def _token_count(text: str) -> int: |
| tokens, _ = tokenize_text(text or "", return_spans=False) |
| return len(tokens) |
|
|
|
|
| def _row_flag(row: dict, key: str, default: bool = False) -> bool: |
| value = (row.get(key) or "").strip().lower() |
| if not value: |
| return default |
| return value == "true" |
|
|
|
|
| def _trim_pattern( |
| text: str, |
| expected: int, |
| pattern: re.Pattern[str], |
| ) -> str: |
| if not text or expected <= 0: |
| return text |
| while True: |
| current = _token_count(text) |
| best = text |
| best_diff = abs(current - expected) |
| improved = False |
| for match in pattern.finditer(text): |
| candidate = ( |
| text[: match.start()] + " " + text[match.end() :] |
| ).strip() |
| diff = abs(_token_count(candidate) - expected) |
| if diff < best_diff: |
| best = candidate |
| best_diff = diff |
| improved = True |
| if not improved: |
| break |
| text = best |
| if _token_count(text) <= expected: |
| break |
| return text |
|
|
|
|
| def _trim_citations(text: str, expected: int) -> str: |
| if not text or expected <= 0: |
| return text |
| current = _token_count(text) |
| if current <= expected: |
| return text |
| text = _trim_pattern(text, expected, CITATION_BRACKET_RE) |
| if _token_count(text) <= expected: |
| return text |
| text = _trim_pattern(text, expected, CITATION_PAREN_RE) |
| return text |
|
|
|
|
| def _trim_to_token_count(text: str, expected: int) -> str: |
| if not text or expected <= 0: |
| return text |
| tokens, spans = tokenize_text(text, return_spans=True) |
| if not spans or len(spans) <= expected: |
| return text |
| end_idx = spans[expected - 1][1] |
| end_idx = _extend_span_end(text, end_idx) |
| return text[:end_idx].rstrip() |
|
|
|
|
| def _cleanup_spacing(text: str) -> str: |
| if not text: |
| return text |
| value = text |
| value = value.replace("“", '"').replace("”", '"') |
| value = value.replace("’", "'").replace("‘", "'") |
|
|
| def _dash_repl(match: re.Match[str]) -> str: |
| run = match.group(0) |
| return "--" if len(run) >= 2 else "-" |
|
|
| value = re.sub(r"[\u2010-\u2015\u2212\u2043]+", _dash_repl, value) |
| value = value.replace("\ufb00", "ff") |
| value = value.replace("\ufb01", "fi") |
| value = value.replace("\ufb02", "fl") |
| value = value.replace("\ufb03", "ffi") |
| value = value.replace("\ufb04", "ffl") |
| value = value.replace("…", "...") |
| value = re.sub(r"-{3,}", "--", value) |
| value = re.sub(r"([a-z0-9])([.!?])(?=[A-Z])", r"\1\2 ", value) |
| value = re.sub(r"([A-Za-z]),(?=[A-Za-z])", r"\1, ", value) |
| value = re.sub(r"([A-Za-z0-9])([;:])(?=[A-Za-z])", r"\1\2 ", value) |
| value = re.sub(r"(\d)\s+(s)\b", r"\1\2", value) |
| value = re.sub(r"([0-9])([A-Za-z])", r"\1 \2", value) |
| value = re.sub(r"[ \t]+([,.;:!?])", r"\1", value) |
| value = re.sub(r"\(\s+", "(", value) |
| value = re.sub(r"\s+\)", ")", value) |
| value = re.sub(r"\[\s+", "[", value) |
| value = re.sub(r"\s+\]", "]", value) |
| value = re.sub(r"\)(?=[A-Za-z0-9])", ") ", value) |
| value = re.sub(r"\](?=[A-Za-z0-9])", "] ", value) |
|
|
| def _space_citation_commas(match: re.Match[str]) -> str: |
| inner = match.group(1) |
| inner = re.sub(r",(?!\\s)", ", ", inner) |
| return f"[{inner}]" |
|
|
| value = re.sub(r"\[([0-9,;\s\-–]+)\]", _space_citation_commas, value) |
| value = re.sub(r"[ \t]{2,}", " ", value) |
| return value |
|
|
|
|
| def _normalize_whitespace( |
| text: str, |
| preserve_linebreaks: bool, |
| ) -> str: |
| if not text: |
| return text |
| value = _cleanup_spacing(text) |
| if preserve_linebreaks: |
| value = re.sub(r"[ \t]{2,}", " ", value) |
| return value |
| value = re.sub(r"\s+", " ", value).strip() |
| return value |
|
|
|
|
| def _normalize_hyphenation( |
| text: str, |
| preserve_hyphenation: bool, |
| ) -> str: |
| if not text: |
| return text |
| if preserve_hyphenation: |
| return text |
| return re.sub(r"([A-Za-z])-\s+([A-Za-z])", r"\1\2", text) |
|
|
|
|
| def _postprocess_text( |
| text: str, |
| expected_tokens: int, |
| preserve_linebreaks: bool, |
| preserve_hyphenation: bool, |
| keep_bracket_citations: bool = True, |
| keep_paren_citations: bool = True, |
| split_letter_digit: bool = True, |
| ) -> str: |
| value = text |
| if not keep_bracket_citations: |
| value = CITATION_BRACKET_RE.sub(" ", value) |
| if not keep_paren_citations: |
| value = CITATION_PAREN_RE.sub(" ", value) |
| value = _trim_citations(value, expected_tokens) |
| value = _trim_to_token_count(value, expected_tokens) |
| value = _cleanup_spacing(value) |
| if split_letter_digit: |
| value = re.sub(r"([A-Za-z])([0-9])", r"\1 \2", value) |
| value = _normalize_hyphenation(value, preserve_hyphenation) |
| return _normalize_whitespace(value, preserve_linebreaks) |
|
|
|
|
| def _ensure_trailing_punct(text: str, end_punct: str) -> str: |
| if not text or not end_punct: |
| stripped = text.rstrip() |
| if not stripped: |
| return text |
| i = len(stripped) - 1 |
| suffix = "" |
| while i >= 0 and stripped[i] in TRAILING_QUOTES: |
| suffix = stripped[i] + suffix |
| i -= 1 |
| base = stripped[: i + 1] |
| if base and base[-1] in END_PUNCT: |
| base = base[:-1] |
| if base and suffix: |
| if ")" in suffix and "(" not in base: |
| suffix = suffix.replace(")", "") |
| if "]" in suffix and "[" not in base: |
| suffix = suffix.replace("]", "") |
| return f"{base}{suffix}" |
| stripped = text.rstrip() |
| if not stripped: |
| return text |
| i = len(stripped) - 1 |
| suffix = "" |
| while i >= 0 and stripped[i] in TRAILING_QUOTES: |
| suffix = stripped[i] + suffix |
| i -= 1 |
| base = stripped[: i + 1] |
| if base and base[-1] in END_PUNCT: |
| base = base[:-1] + end_punct |
| else: |
| base = f"{base}{end_punct}" |
| return f"{base}{suffix}" |
|
|
|
|
| def _find_pdf_hash_span( |
| row: dict, |
| pdf_token_index: Optional[TokenIndex], |
| prefix: str, |
| ) -> Optional[tuple[int, int]]: |
| if pdf_token_index is None: |
| return None |
| spec = _select_hash_specs(row, prefix) |
| if spec: |
| span = pdf_token_index.find_span_by_hash(*spec) |
| if span: |
| return span |
| return None |
|
|
|
|
| def _bool_flag(value: str) -> bool: |
| return (value or "").strip().lower() == "true" |
|
|
|
|
| def _strip_flags(row: dict, prefix: str) -> tuple[bool, bool]: |
| keep_bracket = _bool_flag(row.get(f"{prefix}_has_bracket_citation", "")) |
| keep_paren = _bool_flag(row.get(f"{prefix}_has_paren_citation", "")) |
| return (not keep_bracket), (not keep_paren) |
|
|
|
|
| def _span_matches_hash(row: dict, text: str, prefix: str) -> bool: |
| if not text: |
| return False |
| expected_hash = row.get(f"{prefix}_hash64") or "" |
| expected_sha = row.get(f"{prefix}_sha256") or "" |
| if not expected_hash or not expected_sha: |
| return False |
| strip_brackets, strip_parens = _strip_flags(row, prefix) |
| check_text = strip_citations( |
| text, |
| strip_brackets=strip_brackets, |
| strip_parens=strip_parens, |
| ) |
| tokens, _ = tokenize_text(check_text, return_spans=True) |
| hash64, sha, _ = hash_token_sequence(tokens) |
| return str(hash64) == str(expected_hash) and sha == expected_sha |
|
|
|
|
| def _candidate_ids(paper_id: str, doi: str, arxiv: str) -> list[str]: |
| candidates = [ |
| paper_id, |
| normalize_paper_id(paper_id), |
| ] |
| if doi: |
| candidates.append(doi) |
| candidates.append(doi_suffix(doi)) |
| if arxiv: |
| candidates.append(arxiv) |
| candidates.append(normalize_arxiv(arxiv)) |
| seen = set() |
| ordered = [] |
| for item in candidates: |
| value = (item or "").strip() |
| if value and value not in seen: |
| seen.add(value) |
| ordered.append(value) |
| for alias in candidate_pdf_aliases(paper_id, doi, arxiv): |
| value = (alias or "").strip() |
| if value and value not in seen: |
| seen.add(value) |
| ordered.append(value) |
| return ordered |
|
|
|
|
| def _normalize_title(title: str) -> str: |
| return " ".join(title.lower().split()) |
|
|
|
|
| def _build_meta_index( |
| tei_index: Dict[str, Path], |
| ) -> tuple[Dict[str, Path], Dict[str, Path]]: |
| doi_index: Dict[str, Path] = {} |
| arxiv_index: Dict[str, Path] = {} |
| for path in tei_index.values(): |
| doi, arxiv = extract_ids_from_tei(path) |
| if doi: |
| doi_index.setdefault(normalize_doi(doi), path) |
| doi_index.setdefault(doi_suffix(doi), path) |
| if arxiv: |
| arxiv_index.setdefault(normalize_arxiv(arxiv), path) |
| return doi_index, arxiv_index |
|
|
|
|
| def _resolve_tei_path( |
| paper_id: str, |
| doi: str, |
| arxiv: str, |
| tei_index: Dict[str, Path], |
| doi_index: Dict[str, Path], |
| arxiv_index: Dict[str, Path], |
| ) -> Optional[Path]: |
| for candidate in _candidate_ids(paper_id, doi, arxiv): |
| if candidate in tei_index: |
| return tei_index[candidate] |
| if candidate.startswith("paper_"): |
| stripped = candidate[len("paper_") :] |
| if stripped in tei_index: |
| return tei_index[stripped] |
| if doi: |
| doi_key = normalize_doi(doi) |
| if doi_key in doi_index: |
| return doi_index[doi_key] |
| doi_key = doi_suffix(doi) |
| if doi_key in doi_index: |
| return doi_index[doi_key] |
| if arxiv: |
| arxiv_key = normalize_arxiv(arxiv) |
| if arxiv_key in arxiv_index: |
| return arxiv_index[arxiv_key] |
| return None |
|
|
|
|
| def _tei_stem(path: Path) -> str: |
| name = path.name |
| if name.endswith(".grobid.tei.xml"): |
| name = name[: -len(".grobid.tei.xml")] |
| return name |
|
|
|
|
| def _build_pdf_index(pdf_dir: Path) -> Dict[str, Path]: |
| index: Dict[str, Path] = {} |
| if not pdf_dir.exists(): |
| return index |
| version_re = re.compile(r"^(?P<base>.+?)(v\d+)$", re.IGNORECASE) |
| arxiv_re = re.compile(r"^(?P<base>\d{4}\.\d{4,5})v\d+$", re.IGNORECASE) |
| pii_re = re.compile(r"(S\d{8,})", re.IGNORECASE) |
| for suffix in ("*.pdf", "*.PDF"): |
| for path in pdf_dir.rglob(suffix): |
| stem = path.stem |
| index.setdefault(stem, path) |
| index.setdefault(normalize_paper_id(stem), path) |
| index.setdefault(f"paper_{stem}", path) |
| if stem.startswith("paper_"): |
| stripped = stem[len("paper_") :] |
| if stripped: |
| index.setdefault(stripped, path) |
| index.setdefault(normalize_paper_id(stripped), path) |
| if stem.endswith("_fixed") or stem.endswith("-fixed"): |
| base = ( |
| stem[: -len("_fixed")] |
| if stem.endswith("_fixed") |
| else stem[: -len("-fixed")] |
| ) |
| if base: |
| index[base] = path |
| index[normalize_paper_id(base)] = path |
| index[f"paper_{base}"] = path |
| if base.startswith("paper_"): |
| stripped_base = base[len("paper_") :] |
| if stripped_base: |
| index[stripped_base] = path |
| index[normalize_paper_id(stripped_base)] = path |
| match = arxiv_re.match(stem) |
| if match: |
| base = match.group("base") |
| index.setdefault(base, path) |
| index.setdefault(normalize_paper_id(base), path) |
| match = version_re.match(stem) |
| if match: |
| base = match.group("base") |
| index.setdefault(base, path) |
| index.setdefault(normalize_paper_id(base), path) |
| pii_match = pii_re.search(stem) |
| if pii_match: |
| pii = pii_match.group(1) |
| index.setdefault(pii, path) |
| index.setdefault(normalize_paper_id(pii), path) |
| return index |
|
|
|
|
| def _select_hash_specs( |
| row: dict, |
| prefix: str, |
| ) -> Optional[tuple[int, int, str]]: |
| hash_version = (row.get("hash_version") or "").strip() |
| if hash_version and hash_version != HASH_VERSION: |
| return None |
| count = row.get(f"{prefix}_token_count") or "" |
| hash64 = row.get(f"{prefix}_hash64") or "" |
| sha = row.get(f"{prefix}_sha256") or "" |
| if not count or not hash64 or not sha: |
| return None |
| try: |
| return int(count), int(hash64), sha |
| except ValueError: |
| return None |
|
|
|
|
| def _select_anchor_specs( |
| row: dict, |
| prefix: str, |
| position: str, |
| ) -> Optional[tuple[int, int, str]]: |
| hash_version = (row.get("hash_version") or "").strip() |
| if hash_version and hash_version != HASH_VERSION: |
| return None |
| count = row.get(f"{prefix}_{position}_token_count") or "" |
| hash64 = row.get(f"{prefix}_{position}_hash64") or "" |
| sha = row.get(f"{prefix}_{position}_sha256") or "" |
| if not count or not hash64 or not sha: |
| return None |
| try: |
| return int(count), int(hash64), sha |
| except ValueError: |
| return None |
|
|
|
|
| def _select_anchor_spec_list( |
| row: dict, |
| prefix: str, |
| position: str, |
| ) -> list[tuple[int, int, str]]: |
| specs: list[tuple[int, int, str]] = [] |
| primary = _select_anchor_specs(row, prefix, position) |
| if primary is not None: |
| specs.append(primary) |
| alt_count = row.get(f"{prefix}_{position}_alt_token_count") or "" |
| alt_hash = row.get(f"{prefix}_{position}_alt_hash64") or "" |
| alt_sha = row.get(f"{prefix}_{position}_alt_sha256") or "" |
| hash_version = (row.get("hash_version") or "").strip() |
| if hash_version and hash_version != HASH_VERSION: |
| return specs |
| if alt_count and alt_hash and alt_sha: |
| try: |
| specs.append((int(alt_count), int(alt_hash), alt_sha)) |
| except ValueError: |
| pass |
| return specs |
|
|
|
|
| def _match_doc_by_hash( |
| token_index: TokenIndex, |
| hash_specs: list[tuple[int, int, str]], |
| ) -> int: |
| score = 0 |
| for window, hash64, sha in hash_specs: |
| if token_index.find_span_by_hash(window, hash64, sha): |
| score += 1 |
| return score |
|
|
|
|
| def _build_mid_candidates( |
| token_index: TokenIndex, |
| mid_specs: Optional[list[tuple[int, int, str]]], |
| ) -> list[tuple[int, int]]: |
| if not mid_specs: |
| return [] |
| candidates: list[tuple[int, int]] = [] |
| for spec in mid_specs: |
| for position in token_index.find_token_positions_by_hash(*spec): |
| candidates.append((position, spec[0])) |
| return candidates |
|
|
|
|
| def _span_has_mid( |
| mid_candidates: list[tuple[int, int]], |
| start_idx: int, |
| end_idx: int, |
| ) -> bool: |
| for mid_start, mid_len in mid_candidates: |
| mid_end = mid_start + mid_len - 1 |
| if mid_start >= start_idx and mid_end <= end_idx: |
| return True |
| return False |
|
|
|
|
| def _find_span_by_anchors( |
| token_index: TokenIndex, |
| head_spec: Optional[tuple[int, int, str]], |
| tail_spec: Optional[tuple[int, int, str]], |
| expected_len: int, |
| mid_specs: Optional[list[tuple[int, int, str]]] = None, |
| *, |
| require_mid: bool = False, |
| ) -> Optional[tuple[int, int]]: |
| if head_spec is None or tail_spec is None: |
| return None |
| head_positions = token_index.find_token_positions_by_hash(*head_spec) |
| tail_positions = token_index.find_token_positions_by_hash(*tail_spec) |
| if not head_positions or not tail_positions: |
| return None |
| mid_candidates = [] |
| if require_mid: |
| mid_candidates = _build_mid_candidates(token_index, mid_specs) |
| if not mid_candidates: |
| return None |
| best = None |
| best_diff = None |
| tol = max(5, int(expected_len * 0.3)) if expected_len else 10 |
| min_len = max(1, expected_len // 2) if expected_len else 1 |
| max_len = expected_len * 3 if expected_len else None |
| for head_start in head_positions: |
| head_end = head_start + head_spec[0] - 1 |
| for tail_start in tail_positions: |
| tail_end = tail_start + tail_spec[0] - 1 |
| if tail_end < head_end: |
| continue |
| length = tail_end - head_start + 1 |
| if mid_candidates and not _span_has_mid( |
| mid_candidates, |
| head_start, |
| tail_end, |
| ): |
| continue |
| if expected_len: |
| if length < min_len or (max_len and length > max_len): |
| continue |
| if length < expected_len - tol or length > expected_len + tol: |
| continue |
| diff = abs(length - expected_len) |
| else: |
| diff = length |
| if best_diff is None or diff < best_diff: |
| best_diff = diff |
| best = (head_start, tail_end) |
| if best is None and expected_len: |
| for head_start in head_positions: |
| head_end = head_start + head_spec[0] - 1 |
| for tail_start in tail_positions: |
| tail_end = tail_start + tail_spec[0] - 1 |
| if tail_end < head_end: |
| continue |
| length = tail_end - head_start + 1 |
| if mid_candidates and not _span_has_mid( |
| mid_candidates, |
| head_start, |
| tail_end, |
| ): |
| continue |
| if length < min_len or (max_len and length > max_len): |
| continue |
| diff = abs(length - expected_len) |
| if best_diff is None or diff < best_diff: |
| best_diff = diff |
| best = (head_start, tail_end) |
| if best is None: |
| return None |
| start_char = token_index.spans[best[0]][0] |
| end_char = token_index.spans[best[1]][1] |
| return start_char, end_char |
|
|
|
|
| def _find_span_from_anchor( |
| token_index: TokenIndex, |
| anchor_spec: Optional[tuple[int, int, str]], |
| expected_len: int, |
| position: str, |
| mid_specs: Optional[list[tuple[int, int, str]]] = None, |
| *, |
| require_mid: bool = False, |
| require_unique: bool = False, |
| ) -> Optional[tuple[int, int]]: |
| if anchor_spec is None or expected_len <= 0: |
| return None |
| positions = token_index.find_token_positions_by_hash(*anchor_spec) |
| if not positions: |
| return None |
| if require_unique and len(positions) != 1: |
| return None |
| mid_candidates = [] |
| if require_mid: |
| mid_candidates = _build_mid_candidates(token_index, mid_specs) |
| if not mid_candidates: |
| return None |
| if position == "tail": |
| positions = list(reversed(positions)) |
| for anchor_start in positions: |
| if position == "head": |
| start_idx = anchor_start |
| end_idx = anchor_start + expected_len - 1 |
| else: |
| anchor_end = anchor_start + anchor_spec[0] - 1 |
| end_idx = anchor_end |
| start_idx = end_idx - expected_len + 1 |
| if start_idx < 0 or end_idx >= len(token_index.tokens): |
| continue |
| if mid_candidates and not _span_has_mid( |
| mid_candidates, |
| start_idx, |
| end_idx, |
| ): |
| continue |
| start_char = token_index.spans[start_idx][0] |
| end_char = token_index.spans[end_idx][1] |
| return start_char, end_char |
| return None |
|
|
|
|
| def _pick_best_doc( |
| token_indexes: Dict[Path, TokenIndex], |
| hash_specs: list[tuple[int, int, str]], |
| ) -> Optional[Path]: |
| best_path = None |
| best_score = 0 |
| tie = False |
| for path, token_index in token_indexes.items(): |
| score = _match_doc_by_hash(token_index, hash_specs) |
| if score > best_score: |
| best_score = score |
| best_path = path |
| tie = False |
| elif score == best_score and score > 0: |
| tie = True |
| if best_score == 0 or tie: |
| return None |
| return best_path |
|
|
|
|
| def _pick_best_pdf( |
| pdf_paths: list[Path], |
| pdf_token_cache: Dict[Path, TokenIndex], |
| hash_specs: list[tuple[int, int, str]], |
| ) -> Optional[Path]: |
| best_path = None |
| best_score = 0 |
| tie = False |
| for path in pdf_paths: |
| if path not in pdf_token_cache: |
| pdf_text = extract_text_from_pdf(path) |
| pdf_token_cache[path] = TokenIndex.from_text(pdf_text) |
| token_index = pdf_token_cache[path] |
| score = _match_doc_by_hash(token_index, hash_specs) |
| if score > best_score: |
| best_score = score |
| best_path = path |
| tie = False |
| elif score == best_score and score > 0: |
| tie = True |
| if best_score == 0 or tie: |
| return None |
| return best_path |
|
|
|
|
| def _run_grobid(input_dir: Path, output_dir: Path, config: Path) -> None: |
| output_dir.mkdir(parents=True, exist_ok=True) |
| cmd = [ |
| sys.executable, |
| "scripts/pdf_to_grobid.py", |
| "--input_folder", |
| str(input_dir), |
| "--output_folder", |
| str(output_dir), |
| ] |
| if config and config.exists(): |
| cmd.extend(["--config", str(config)]) |
| subprocess.run(cmd, check=True) |
|
|
|
|
| def main() -> None: |
| parser = argparse.ArgumentParser( |
| description="Hydrate DefExtra legal CSV using user-provided PDFs.", |
| ) |
| parser.add_argument( |
| "--legal-csv", |
| type=Path, |
| default=Path("results/paper_results/defextra_legal.csv"), |
| help="Legal DefExtra CSV with markers.", |
| ) |
| parser.add_argument( |
| "--pdf-dir", |
| type=Path, |
| required=True, |
| help="Directory with user-provided PDFs.", |
| ) |
| parser.add_argument( |
| "--grobid-out", |
| type=Path, |
| default=Path("outputs/defextra_grobid"), |
| help="Output directory for GROBID TEI files.", |
| ) |
| parser.add_argument( |
| "--grobid-config", |
| type=Path, |
| default=Path("config.json"), |
| help="Optional GROBID client config path.", |
| ) |
| parser.add_argument( |
| "--skip-grobid", |
| action="store_true", |
| help="Skip running GROBID (expects TEI files already present).", |
| ) |
| parser.add_argument( |
| "--output-csv", |
| type=Path, |
| default=Path("results/paper_results/defextra_hydrated.csv"), |
| help="Output hydrated CSV with excerpts.", |
| ) |
| parser.add_argument( |
| "--report", |
| type=Path, |
| default=None, |
| help="Optional report path for missing matches.", |
| ) |
| parser.add_argument( |
| "--require-complete", |
| action="store_true", |
| help="Exit with error if any definition/context is missing.", |
| ) |
| parser.add_argument( |
| "--filter-to-pdfs", |
| action="store_true", |
| help="Only process rows that can be mapped to a provided PDF.", |
| ) |
| parser.add_argument( |
| "--allow-pdf-hash-mismatch", |
| action="store_true", |
| help=( |
| "Continue when a PDF filename matches but hash markers do not. " |
| "By default, such PDFs are skipped and reported." |
| ), |
| ) |
| args = parser.parse_args() |
|
|
| if not args.legal_csv.exists(): |
| raise SystemExit(f"Legal CSV not found: {args.legal_csv}") |
| if not args.pdf_dir.exists(): |
| raise SystemExit(f"PDF dir not found: {args.pdf_dir}") |
|
|
| if not args.skip_grobid: |
| try: |
| _run_grobid(args.pdf_dir, args.grobid_out, args.grobid_config) |
| except subprocess.CalledProcessError: |
| raise SystemExit( |
| "GROBID processing failed. Ensure the GROBID server is running " |
| "and reachable (default: http://localhost:8070), or supply " |
| "--grobid-config with the correct server URL.", |
| ) |
|
|
| tei_index = build_tei_index([args.grobid_out]) |
| doi_index, arxiv_index = _build_meta_index(tei_index) |
| pdf_index = _build_pdf_index(args.pdf_dir) |
| doc_cache: Dict[str, Optional[DocIndex]] = {} |
| token_cache: Dict[str, Optional[TokenIndex]] = {} |
| tei_path_cache: Dict[str, Optional[Path]] = {} |
| pdf_token_cache: Dict[Path, TokenIndex] = {} |
| pdf_token_cache_stripped: Dict[tuple[Path, bool, bool], TokenIndex] = {} |
| tei_token_cache_stripped: Dict[tuple[Path, bool, bool], TokenIndex] = {} |
| pdf_failed: set[Path] = set() |
|
|
| def _get_stripped_index( |
| cache: Dict[tuple[Path, bool, bool], TokenIndex], |
| source_path: Optional[Path], |
| source_text: str, |
| strip_brackets: bool, |
| strip_parens: bool, |
| ) -> Optional[TokenIndex]: |
| if source_path is None: |
| return None |
| if not strip_brackets and not strip_parens: |
| return None |
| key = (source_path, strip_brackets, strip_parens) |
| if key not in cache: |
| stripped = strip_citations( |
| source_text, |
| strip_brackets=strip_brackets, |
| strip_parens=strip_parens, |
| ) |
| cache[key] = TokenIndex.from_text(stripped) |
| return cache[key] |
|
|
| with args.legal_csv.open("r", encoding="utf-8", newline="") as handle: |
| reader = csv.DictReader(handle) |
| legal_rows = list(reader) |
|
|
| paper_hashes: Dict[str, list[tuple[int, int, str]]] = {} |
| title_to_ids: Dict[str, list[str]] = {} |
| id_to_row: Dict[str, dict] = {} |
| for row in legal_rows: |
| paper_id = (row.get("paper_id") or "").strip() |
| if not paper_id: |
| continue |
| if paper_id not in id_to_row: |
| id_to_row[paper_id] = row |
| title_key = _normalize_title(row.get("paper_title") or "") |
| if title_key: |
| title_to_ids.setdefault(title_key, []).append(paper_id) |
| specs: list[tuple[int, int, str]] = [] |
| for prefix in ("definition", "context"): |
| spec = _select_hash_specs(row, prefix) |
| if spec is None: |
| continue |
| token_count, _, _ = spec |
| if token_count >= 5: |
| specs.append(spec) |
| if specs: |
| paper_hashes.setdefault(paper_id, []).extend(specs) |
| for prefix in ("definition", "context"): |
| for position in ("head", "mid", "tail"): |
| for spec in _select_anchor_spec_list(row, prefix, position): |
| if spec and spec[0] >= 5: |
| paper_hashes.setdefault(paper_id, []).append(spec) |
|
|
| tei_token_indexes: Dict[Path, TokenIndex] = {} |
| allowed_stems = set(pdf_index.keys()) if pdf_index else set() |
| for tei_path in tei_index.values(): |
| if allowed_stems: |
| stem = _tei_stem(tei_path) |
| stem_norm = normalize_paper_id(stem) |
| stem_stripped = ( |
| stem[len("paper_") :] if stem.startswith("paper_") else stem |
| ) |
| if ( |
| stem not in allowed_stems |
| and stem_norm not in allowed_stems |
| and stem_stripped not in allowed_stems |
| ): |
| continue |
| try: |
| doc_index = DocIndex.from_tei(tei_path) |
| except Exception: |
| continue |
| tei_token_indexes[tei_path] = TokenIndex.from_text(doc_index.doc_text) |
|
|
| output_rows = [] |
| missing_papers = set() |
| missing_defs = 0 |
| missing_ctxs = 0 |
| hydrated_from_pdf = 0 |
| hydrated_from_anchor = 0 |
| pdf_hash_mismatches: list[dict[str, str]] = [] |
| pdf_hash_mismatch_seen: set[tuple[str, str]] = set() |
| missing_def_rows: list[dict] = [] |
| missing_ctx_rows: list[dict] = [] |
|
|
| for row in legal_rows: |
| paper_id = (row.get("paper_id") or "").strip() |
| doi = (row.get("paper_doi") or "").strip() |
| arxiv = (row.get("paper_arxiv") or "").strip() |
|
|
| if paper_id not in doc_cache: |
| tei_path = _resolve_tei_path( |
| paper_id, |
| doi, |
| arxiv, |
| tei_index, |
| doi_index, |
| arxiv_index, |
| ) |
| if tei_path is None: |
| hash_specs = paper_hashes.get(paper_id, []) |
| if hash_specs: |
| tei_path = _pick_best_doc( |
| tei_token_indexes, |
| hash_specs, |
| ) |
| if tei_path is None: |
| doc_cache[paper_id] = None |
| token_cache[paper_id] = None |
| tei_path_cache[paper_id] = None |
| else: |
| doc_index = DocIndex.from_tei(tei_path) |
| doc_cache[paper_id] = doc_index |
| token_cache[paper_id] = TokenIndex.from_text( |
| doc_index.doc_text, |
| ) |
| tei_path_cache[paper_id] = tei_path |
|
|
| doc_index = doc_cache.get(paper_id) |
| tei_token_index = token_cache.get(paper_id) |
| definition = "" |
| context = "" |
| pdf_token_index: Optional[TokenIndex] = None |
| pdf_path = None |
| tei_path = tei_path_cache.get(paper_id) |
| pdf_direct_match = False |
| if tei_path is not None: |
| stem = _tei_stem(tei_path) |
| pdf_path = pdf_index.get(stem) or pdf_index.get( |
| normalize_paper_id(stem), |
| ) |
| if pdf_path is not None: |
| pdf_direct_match = True |
| if pdf_path is None: |
| for candidate in _candidate_ids(paper_id, doi, arxiv): |
| pdf_path = pdf_index.get(candidate) |
| if pdf_path: |
| pdf_direct_match = True |
| break |
| if pdf_path is None: |
| title_key = _normalize_title(row.get("paper_title") or "") |
| for other_id in title_to_ids.get(title_key, []): |
| if other_id == paper_id: |
| continue |
| other_row = id_to_row.get(other_id, {}) |
| other_doi = (other_row.get("paper_doi") or "").strip() |
| other_arxiv = (other_row.get("paper_arxiv") or "").strip() |
| for candidate in _candidate_ids( |
| other_id, |
| other_doi, |
| other_arxiv, |
| ): |
| pdf_path = pdf_index.get(candidate) |
| if pdf_path: |
| pdf_direct_match = True |
| break |
| if pdf_path: |
| break |
| hash_specs = paper_hashes.get(paper_id, []) |
| if pdf_path is not None and hash_specs: |
| if pdf_path not in pdf_token_cache: |
| pdf_text = extract_text_from_pdf(pdf_path) |
| pdf_token_cache[pdf_path] = TokenIndex.from_text(pdf_text) |
| pdf_token_index = pdf_token_cache[pdf_path] |
| if _match_doc_by_hash(pdf_token_index, hash_specs) == 0: |
| mismatch_key = (paper_id, str(pdf_path)) |
| if mismatch_key not in pdf_hash_mismatch_seen: |
| pdf_hash_mismatch_seen.add(mismatch_key) |
| pdf_hash_mismatches.append( |
| {"paper_id": paper_id, "pdf": str(pdf_path)}, |
| ) |
| if not args.allow_pdf_hash_mismatch and pdf_direct_match: |
| print( |
| f"Warning: PDF hash markers did not match for {pdf_path.name}; " |
| "skipping PDF (use --allow-pdf-hash-mismatch to override).", |
| file=sys.stderr, |
| ) |
| elif args.allow_pdf_hash_mismatch: |
| print( |
| f"Warning: PDF hash markers did not match for {pdf_path.name}; " |
| "continuing with direct filename match.", |
| file=sys.stderr, |
| ) |
| if not args.allow_pdf_hash_mismatch: |
| pdf_path = None |
| pdf_token_index = None |
| pdf_direct_match = False |
| if pdf_path is None and hash_specs: |
| pdf_paths = list({p for p in pdf_index.values()}) |
| if pdf_paths: |
| pdf_path = _pick_best_pdf( |
| pdf_paths, |
| pdf_token_cache, |
| hash_specs, |
| ) |
|
|
| if args.filter_to_pdfs and pdf_path is None: |
| continue |
|
|
| if pdf_path is not None and pdf_path not in pdf_token_cache: |
| try: |
| pdf_text = extract_text_from_pdf(pdf_path) |
| pdf_token_cache[pdf_path] = TokenIndex.from_text(pdf_text) |
| except Exception as exc: |
| pdf_token_cache[pdf_path] = TokenIndex.from_text("") |
| if pdf_path not in pdf_failed: |
| pdf_failed.add(pdf_path) |
| print( |
| f"Warning: PDF text extraction failed for {pdf_path.name}: {exc}", |
| file=sys.stderr, |
| ) |
| pdf_token_index = pdf_token_cache.get(pdf_path) if pdf_path else None |
|
|
| if doc_index is None: |
| missing_papers.add(paper_id) |
| else: |
| def_start = row.get("definition_char_start") or "" |
| def_end = row.get("definition_char_end") or "" |
| ctx_start = row.get("context_char_start") or "" |
| ctx_end = row.get("context_char_end") or "" |
| def_strip_brackets, def_strip_parens = _strip_flags( |
| row, |
| "definition", |
| ) |
| ctx_strip_brackets, ctx_strip_parens = _strip_flags(row, "context") |
|
|
| if not definition and tei_token_index: |
| spec = _select_hash_specs(row, "definition") |
| if spec: |
| span = tei_token_index.find_span_by_hash(*spec) |
| if span: |
| definition = _extract_with_trailing_punct( |
| doc_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| if not definition: |
| stripped_index = _get_stripped_index( |
| tei_token_cache_stripped, |
| tei_path, |
| doc_index.doc_text, |
| def_strip_brackets, |
| def_strip_parens, |
| ) |
| if stripped_index is not None: |
| span = stripped_index.find_span_by_hash(*spec) |
| if span: |
| definition = _extract_with_trailing_punct( |
| stripped_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| if not definition and not (def_start and def_end): |
| head_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "head", |
| ) |
| mid_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "mid", |
| ) |
| tail_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "tail", |
| ) |
| expected_len = int(row.get("definition_token_count") or 0) |
| for head_spec in head_specs or [None]: |
| for tail_spec in tail_specs or [None]: |
| if head_spec is None or tail_spec is None: |
| continue |
| span = _find_span_by_anchors( |
| tei_token_index, |
| head_spec, |
| tail_spec, |
| expected_len, |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_by_anchors( |
| tei_token_index, |
| head_spec, |
| tail_spec, |
| expected_len, |
| ) |
| if span: |
| definition = _extract_with_trailing_punct( |
| doc_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| break |
| if definition: |
| break |
| if not definition: |
| head_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "head", |
| ) |
| mid_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "mid", |
| ) |
| tail_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "tail", |
| ) |
| expected_len = int(row.get("definition_token_count") or 0) |
| span = None |
| for head_spec in head_specs: |
| if mid_specs: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| require_unique=True, |
| ) |
| else: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| ) |
| if span: |
| break |
| if span is None: |
| for tail_spec in tail_specs: |
| if mid_specs: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| require_unique=True, |
| ) |
| elif span is None: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| ) |
| if span: |
| break |
| if span: |
| definition = _extract_with_trailing_punct( |
| doc_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| if not definition and def_start and def_end: |
| candidate = _extract_with_trailing_punct( |
| doc_index.doc_text, |
| int(def_start), |
| int(def_end), |
| ) |
| if _span_matches_hash(row, candidate, "definition"): |
| definition = candidate |
| if not definition and pdf_token_index: |
| span = _find_pdf_hash_span(row, pdf_token_index, "definition") |
| if span: |
| definition = _extract_with_trailing_punct( |
| pdf_token_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
| if not definition: |
| stripped_index = _get_stripped_index( |
| pdf_token_cache_stripped, |
| pdf_path, |
| pdf_token_index.doc_text, |
| def_strip_brackets, |
| def_strip_parens, |
| ) |
| if stripped_index is not None: |
| span = _find_pdf_hash_span( |
| row, |
| stripped_index, |
| "definition", |
| ) |
| if span: |
| definition = _extract_with_trailing_punct( |
| stripped_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
|
|
| if not context and tei_token_index: |
| spec = _select_hash_specs(row, "context") |
| if spec: |
| span = tei_token_index.find_span_by_hash(*spec) |
| if span: |
| context = _extract_with_trailing_punct( |
| doc_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| if not context: |
| stripped_index = _get_stripped_index( |
| tei_token_cache_stripped, |
| tei_path, |
| doc_index.doc_text, |
| ctx_strip_brackets, |
| ctx_strip_parens, |
| ) |
| if stripped_index is not None: |
| span = stripped_index.find_span_by_hash(*spec) |
| if span: |
| context = _extract_with_trailing_punct( |
| stripped_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| if not context and not (ctx_start and ctx_end): |
| head_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "head", |
| ) |
| mid_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "mid", |
| ) |
| tail_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "tail", |
| ) |
| expected_len = int(row.get("context_token_count") or 0) |
| for head_spec in head_specs or [None]: |
| for tail_spec in tail_specs or [None]: |
| if head_spec is None or tail_spec is None: |
| continue |
| span = _find_span_by_anchors( |
| tei_token_index, |
| head_spec, |
| tail_spec, |
| expected_len, |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_by_anchors( |
| tei_token_index, |
| head_spec, |
| tail_spec, |
| expected_len, |
| ) |
| if span: |
| context = _extract_with_trailing_punct( |
| doc_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| break |
| if context: |
| break |
| if not context: |
| head_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "head", |
| ) |
| mid_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "mid", |
| ) |
| tail_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "tail", |
| ) |
| expected_len = int(row.get("context_token_count") or 0) |
| span = None |
| for head_spec in head_specs: |
| if mid_specs: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| require_unique=True, |
| ) |
| else: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| ) |
| if span: |
| break |
| if span is None: |
| for tail_spec in tail_specs: |
| if mid_specs: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| require_unique=True, |
| ) |
| elif span is None: |
| span = _find_span_from_anchor( |
| tei_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| ) |
| if span: |
| break |
| if span: |
| context = _extract_with_trailing_punct( |
| doc_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| if not context and ctx_start and ctx_end: |
| candidate = _extract_with_trailing_punct( |
| doc_index.doc_text, |
| int(ctx_start), |
| int(ctx_end), |
| ) |
| if _span_matches_hash(row, candidate, "context"): |
| context = candidate |
| if not context and pdf_token_index: |
| span = _find_pdf_hash_span(row, pdf_token_index, "context") |
| if span: |
| context = _extract_with_trailing_punct( |
| pdf_token_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
| if not context: |
| stripped_index = _get_stripped_index( |
| pdf_token_cache_stripped, |
| pdf_path, |
| pdf_token_index.doc_text, |
| ctx_strip_brackets, |
| ctx_strip_parens, |
| ) |
| if stripped_index is not None: |
| span = _find_pdf_hash_span( |
| row, |
| stripped_index, |
| "context", |
| ) |
| if span: |
| context = _extract_with_trailing_punct( |
| stripped_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
|
|
| if not definition and pdf_path is not None and pdf_token_index: |
| spec = _select_hash_specs(row, "definition") |
| if spec: |
| span = pdf_token_index.find_span_by_hash(*spec) |
| if span: |
| definition = _extract_with_trailing_punct( |
| pdf_token_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
| if not definition: |
| head_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "head", |
| ) |
| mid_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "mid", |
| ) |
| tail_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "tail", |
| ) |
| expected_len = int(row.get("definition_token_count") or 0) |
| for head_spec in head_specs or [None]: |
| for tail_spec in tail_specs or [None]: |
| if head_spec is None or tail_spec is None: |
| continue |
| span = _find_span_by_anchors( |
| pdf_token_index, |
| head_spec, |
| tail_spec, |
| expected_len, |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_by_anchors( |
| pdf_token_index, |
| head_spec, |
| tail_spec, |
| expected_len, |
| ) |
| if span: |
| definition = _extract_with_trailing_punct( |
| pdf_token_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
| hydrated_from_anchor += 1 |
| break |
| if definition: |
| break |
| if not definition: |
| head_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "head", |
| ) |
| mid_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "mid", |
| ) |
| tail_specs = _select_anchor_spec_list( |
| row, |
| "definition", |
| "tail", |
| ) |
| expected_len = int(row.get("definition_token_count") or 0) |
| span = None |
| for head_spec in head_specs: |
| if mid_specs: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| require_unique=True, |
| ) |
| else: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| ) |
| if span: |
| break |
| if span is None: |
| for tail_spec in tail_specs: |
| if mid_specs: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| require_unique=True, |
| ) |
| else: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| ) |
| if span: |
| break |
| if span: |
| definition = _extract_with_trailing_punct( |
| pdf_token_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
| hydrated_from_anchor += 1 |
|
|
| if not definition: |
| missing_defs += 1 |
| missing_def_rows.append( |
| { |
| "paper_id": paper_id, |
| "concept": row.get("concept", ""), |
| "reason": "missing_definition", |
| }, |
| ) |
|
|
| if not context and pdf_path is not None and pdf_token_index: |
| spec = _select_hash_specs(row, "context") |
| if spec: |
| span = pdf_token_index.find_span_by_hash(*spec) |
| if span: |
| context = _extract_with_trailing_punct( |
| pdf_token_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
| if not context: |
| head_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "head", |
| ) |
| mid_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "mid", |
| ) |
| tail_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "tail", |
| ) |
| expected_len = int(row.get("context_token_count") or 0) |
| for head_spec in head_specs or [None]: |
| for tail_spec in tail_specs or [None]: |
| if head_spec is None or tail_spec is None: |
| continue |
| span = _find_span_by_anchors( |
| pdf_token_index, |
| head_spec, |
| tail_spec, |
| expected_len, |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_by_anchors( |
| pdf_token_index, |
| head_spec, |
| tail_spec, |
| expected_len, |
| ) |
| if span: |
| context = _extract_with_trailing_punct( |
| pdf_token_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
| hydrated_from_anchor += 1 |
| break |
| if context: |
| break |
| if not context: |
| head_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "head", |
| ) |
| mid_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "mid", |
| ) |
| tail_specs = _select_anchor_spec_list( |
| row, |
| "context", |
| "tail", |
| ) |
| expected_len = int(row.get("context_token_count") or 0) |
| span = None |
| for head_spec in head_specs: |
| if mid_specs: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| require_unique=True, |
| ) |
| else: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| head_spec, |
| expected_len, |
| "head", |
| mid_specs, |
| ) |
| if span: |
| break |
| if span is None: |
| for tail_spec in tail_specs: |
| if mid_specs: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| require_mid=True, |
| ) |
| if span is None: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| require_unique=True, |
| ) |
| else: |
| span = _find_span_from_anchor( |
| pdf_token_index, |
| tail_spec, |
| expected_len, |
| "tail", |
| mid_specs, |
| ) |
| if span: |
| break |
| if span: |
| context = _extract_with_trailing_punct( |
| pdf_token_index.doc_text, |
| span[0], |
| span[1], |
| ) |
| hydrated_from_pdf += 1 |
| hydrated_from_anchor += 1 |
|
|
| if not context: |
| missing_ctxs += 1 |
| missing_ctx_rows.append( |
| { |
| "paper_id": paper_id, |
| "concept": row.get("concept", ""), |
| "reason": "missing_context", |
| }, |
| ) |
|
|
| def_preserve_lines = _row_flag( |
| row, |
| "definition_preserve_linebreaks", |
| ) |
| ctx_preserve_lines = _row_flag( |
| row, |
| "context_preserve_linebreaks", |
| ) |
| def_preserve_hyph = _row_flag( |
| row, |
| "definition_preserve_hyphenation", |
| ) |
| ctx_preserve_hyph = _row_flag( |
| row, |
| "context_preserve_hyphenation", |
| ) |
| def_keep_bracket = _row_flag( |
| row, |
| "definition_has_bracket_citation", |
| True, |
| ) |
| def_keep_paren = _row_flag( |
| row, |
| "definition_has_paren_citation", |
| True, |
| ) |
| def_split_letter_digit = not _row_flag( |
| row, |
| "definition_has_letter_digit", |
| ) |
| ctx_keep_bracket = _row_flag( |
| row, |
| "context_has_bracket_citation", |
| True, |
| ) |
| ctx_keep_paren = _row_flag( |
| row, |
| "context_has_paren_citation", |
| True, |
| ) |
| ctx_split_letter_digit = not _row_flag( |
| row, |
| "context_has_letter_digit", |
| ) |
|
|
| output_rows.append( |
| { |
| "paper_id": paper_id, |
| "paper_title": row.get("paper_title", ""), |
| "paper_doi": doi, |
| "paper_arxiv": arxiv, |
| "concept": row.get("concept", ""), |
| "definition": _ensure_trailing_punct( |
| _postprocess_text( |
| definition, |
| int(row.get("definition_token_count") or 0), |
| def_preserve_lines, |
| def_preserve_hyph, |
| def_keep_bracket, |
| def_keep_paren, |
| def_split_letter_digit, |
| ), |
| row.get("definition_end_punct", ""), |
| ), |
| "context": _ensure_trailing_punct( |
| _postprocess_text( |
| context, |
| int(row.get("context_token_count") or 0), |
| ctx_preserve_lines, |
| ctx_preserve_hyph, |
| ctx_keep_bracket, |
| ctx_keep_paren, |
| ctx_split_letter_digit, |
| ), |
| row.get("context_end_punct", ""), |
| ), |
| "definition_type": row.get("definition_type", ""), |
| "source_file": row.get("source_file", ""), |
| "is_out_of_domain": row.get("is_out_of_domain", ""), |
| }, |
| ) |
|
|
| args.output_csv.parent.mkdir(parents=True, exist_ok=True) |
| with args.output_csv.open("w", encoding="utf-8", newline="") as handle: |
| fieldnames = [ |
| "paper_id", |
| "paper_title", |
| "paper_doi", |
| "paper_arxiv", |
| "concept", |
| "definition", |
| "context", |
| "definition_type", |
| "source_file", |
| "is_out_of_domain", |
| ] |
| writer = csv.DictWriter(handle, fieldnames=fieldnames) |
| writer.writeheader() |
| for row in output_rows: |
| writer.writerow(row) |
|
|
| print(f"Wrote hydrated CSV to {args.output_csv}") |
| print(f"Missing TEI for {len(missing_papers)} papers") |
| print(f"Missing definition spans: {missing_defs}") |
| print(f"Missing context spans: {missing_ctxs}") |
| print( |
| "Hydrated from PDF fallback: " |
| f"{hydrated_from_pdf} (anchors used: {hydrated_from_anchor})", |
| ) |
| if args.report is not None: |
| report_lines = [] |
| if missing_papers: |
| report_lines.append("Missing papers:") |
| for paper_id in sorted(missing_papers): |
| report_lines.append(f"- {paper_id}") |
| report_lines.append("") |
| if pdf_hash_mismatches: |
| report_lines.append( |
| "PDF hash mismatches (filename matched, hash did not):", |
| ) |
| for item in pdf_hash_mismatches: |
| report_lines.append( |
| f"- {item['paper_id']} | {item['pdf']}", |
| ) |
| report_lines.append( |
| "Note: rerun with --allow-pdf-hash-mismatch to continue with these PDFs.", |
| ) |
| report_lines.append("") |
| report_lines.append(f"Missing definition spans: {missing_defs}") |
| report_lines.append(f"Missing context spans: {missing_ctxs}") |
| if missing_def_rows: |
| report_lines.append("") |
| report_lines.append("Missing definitions (paper_id | concept):") |
| for item in missing_def_rows: |
| report_lines.append( |
| f"- {item['paper_id']} | {item['concept']}", |
| ) |
| if missing_ctx_rows: |
| report_lines.append("") |
| report_lines.append("Missing contexts (paper_id | concept):") |
| for item in missing_ctx_rows: |
| report_lines.append( |
| f"- {item['paper_id']} | {item['concept']}", |
| ) |
| args.report.parent.mkdir(parents=True, exist_ok=True) |
| args.report.write_text( |
| "\n".join(report_lines) + "\n", |
| encoding="utf-8", |
| ) |
| print(f"Wrote report to {args.report}") |
| if args.require_complete and (missing_defs or missing_ctxs): |
| raise SystemExit( |
| "Hydration incomplete: " |
| f"{missing_defs} definitions, {missing_ctxs} contexts missing.", |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|