| """ |
| Knowledge Drift Dataset - Master Collection Script |
| ==================================================== |
| Orchestrates all data collection and produces the final unified dataset. |
| |
| Usage: |
| python collect_all.py --all # Run everything |
| python collect_all.py --static # Only static/manual facts |
| python collect_all.py --wikidata # Only Wikidata queries |
| python collect_all.py --merge # Merge existing raw files |
| python collect_all.py --stats # Print dataset statistics |
| """ |
|
|
| import argparse |
| import json |
| import os |
| import logging |
| from datetime import datetime |
| from typing import Dict, List |
| from collections import Counter |
|
|
| import sys |
| SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) |
| sys.path.insert(0, SCRIPT_DIR) |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format='%(asctime)s - %(levelname)s - %(message)s' |
| ) |
| logger = logging.getLogger(__name__) |
|
|
| DATA_DIR = os.path.join(SCRIPT_DIR, "data") |
|
|
|
|
| def ensure_data_dir(): |
| os.makedirs(DATA_DIR, exist_ok=True) |
|
|
|
|
| def collect_static(): |
| """Run static facts collection.""" |
| from collectors.static_collector import collect_all_static |
| |
| logger.info("=" * 60) |
| logger.info("COLLECTING STATIC/MANUAL FACTS") |
| logger.info("=" * 60) |
| |
| results = collect_all_static() |
| |
| output_path = os.path.join(DATA_DIR, "static_facts.json") |
| with open(output_path, 'w', encoding='utf-8') as f: |
| json.dump(results, f, indent=2, ensure_ascii=False) |
| |
| logger.info(f"Static facts saved to {output_path}") |
| return results |
|
|
|
|
| def collect_wikidata(): |
| """Run Wikidata SPARQL collection.""" |
| from collectors.wikidata_collector import collect_all_wikidata |
| |
| logger.info("=" * 60) |
| logger.info("COLLECTING FROM WIKIDATA") |
| logger.info("=" * 60) |
| |
| results = collect_all_wikidata() |
| |
| output_path = os.path.join(DATA_DIR, "wikidata_raw.json") |
| with open(output_path, 'w', encoding='utf-8') as f: |
| json.dump(results, f, indent=2, ensure_ascii=False) |
| |
| logger.info(f"Wikidata facts saved to {output_path}") |
| return results |
|
|
|
|
| def flatten_to_query_level(all_data: Dict) -> List[Dict]: |
| """ |
| Flatten the hierarchical dataset into individual query-level samples. |
| Each sample is a single query at a single timestamp โ ready for model inference. |
| |
| This is the format you'll feed to the model + probe pipeline: |
| - Run model on query |
| - Extract hidden states |
| - Record model's answer |
| - Compare with expected answer |
| - Train/evaluate drift detection probe |
| """ |
| flat_samples = [] |
| sample_id = 0 |
| |
| for source_key, items in all_data.items(): |
| for item in items: |
| |
| if "temporal_variants" in item: |
| for variant in item["temporal_variants"]: |
| flat_sample = { |
| "sample_id": f"q_{sample_id:06d}", |
| "query": variant["query"], |
| "expected_answer": variant.get("expected_answer", ""), |
| "year": variant.get("year", 0), |
| "temporal_zone": variant.get("temporal_zone", "unknown"), |
| "is_drifted_query": variant.get("is_drifted_query", False), |
| "model_likely_answer": variant.get("model_likely_answer", ""), |
| "language": variant.get("language", "en"), |
| |
| "entity": item.get("entity", ""), |
| "relation": item.get("relation", ""), |
| "knowledge_type": item.get("knowledge_type", ""), |
| "category": item.get("category", ""), |
| "source": item.get("source", ""), |
| "parent_id": item.get("id", ""), |
| "drift_date": item.get("drift_date", ""), |
| } |
| flat_samples.append(flat_sample) |
| sample_id += 1 |
| |
| |
| for lang_key in ["temporal_variants_ar", "temporal_variants_en"]: |
| if lang_key in item: |
| for variant in item[lang_key]: |
| flat_sample = { |
| "sample_id": f"q_{sample_id:06d}", |
| "query": variant["query"], |
| "expected_answer": variant.get("expected_answer", ""), |
| "year": variant.get("year", 0), |
| "temporal_zone": variant.get("temporal_zone", "unknown"), |
| "is_drifted_query": variant.get("is_drifted_query", False), |
| "language": variant.get("language", "en"), |
| "entity": item.get("entity", item.get("entity_en", "")), |
| "relation": item.get("relation", item.get("relation_en", "")), |
| "knowledge_type": item.get("knowledge_type", ""), |
| "category": item.get("category", ""), |
| "source": item.get("source", ""), |
| "parent_id": item.get("id", ""), |
| "is_bilingual": True, |
| "drift_date": item.get("drift_date", ""), |
| } |
| flat_samples.append(flat_sample) |
| sample_id += 1 |
| |
| |
| if "temporal_variants" not in item and "temporal_variants_ar" not in item: |
| from configs.config import TIMESTAMP_YEARS |
| templates = item.get("templates", []) |
| if not templates: |
| continue |
| template = templates[0] |
| |
| for year in TIMESTAMP_YEARS: |
| temporal_zone = "pre_cutoff" if year < 2024 else ( |
| "near_cutoff" if year == 2024 else "post_cutoff" |
| ) |
| |
| try: |
| query = template.format( |
| year=year, |
| subject=item.get("entity", ""), |
| object="___" |
| ) |
| except (KeyError, IndexError): |
| query = f"In {year}, {item.get('entity', '')} {item.get('relation', '')} ___" |
| |
| |
| if item["category"] == "unknown_drift": |
| if temporal_zone == "post_cutoff": |
| expected = item.get("new_answer", "") |
| model_likely = item.get("old_answer", "") |
| is_drifted = True |
| else: |
| expected = item.get("old_answer", "") |
| model_likely = "" |
| is_drifted = False |
| elif item["category"] == "known_drift": |
| |
| drift_date = item.get("drift_date", "") |
| try: |
| drift_year = int(drift_date[:4]) if drift_date else 2023 |
| except ValueError: |
| drift_year = 2023 |
| expected = item["old_answer"] if year < drift_year else item["new_answer"] |
| model_likely = "" |
| is_drifted = False |
| else: |
| expected = item.get("answer", "") |
| model_likely = "" |
| is_drifted = False |
| |
| flat_sample = { |
| "sample_id": f"q_{sample_id:06d}", |
| "query": query, |
| "expected_answer": expected, |
| "year": year, |
| "temporal_zone": temporal_zone, |
| "is_drifted_query": is_drifted, |
| "model_likely_answer": model_likely, |
| "language": "en", |
| "entity": item.get("entity", ""), |
| "relation": item.get("relation", ""), |
| "knowledge_type": item.get("knowledge_type", ""), |
| "category": item.get("category", ""), |
| "source": item.get("source", ""), |
| "parent_id": item.get("entity_qid", ""), |
| "drift_date": item.get("drift_date", ""), |
| } |
| flat_samples.append(flat_sample) |
| sample_id += 1 |
| |
| return flat_samples |
|
|
|
|
| def merge_all(): |
| """Merge all collected data into a single unified dataset.""" |
| logger.info("=" * 60) |
| logger.info("MERGING ALL DATA") |
| logger.info("=" * 60) |
| |
| all_data = {} |
| |
| |
| static_path = os.path.join(DATA_DIR, "static_facts.json") |
| if os.path.exists(static_path): |
| with open(static_path, 'r', encoding='utf-8') as f: |
| static_data = json.load(f) |
| all_data.update(static_data) |
| logger.info(f"Loaded static facts from {static_path}") |
| |
| |
| wiki_path = os.path.join(DATA_DIR, "wikidata_raw.json") |
| if os.path.exists(wiki_path): |
| with open(wiki_path, 'r', encoding='utf-8') as f: |
| wiki_data = json.load(f) |
| all_data.update(wiki_data) |
| logger.info(f"Loaded wikidata facts from {wiki_path}") |
| |
| |
| flat_samples = flatten_to_query_level(all_data) |
| |
| |
| dataset = { |
| "metadata": { |
| "name": "Knowledge Drift Detection Dataset", |
| "version": "1.0", |
| "created": datetime.now().isoformat(), |
| "model_target": "Qwen 2.5 7B", |
| "model_cutoff": "2024-08-01", |
| "description": ( |
| "Dataset for detecting knowledge drift in LLMs using " |
| "mechanistic interpretability. Contains temporally-scoped " |
| "factual queries across stable and changing knowledge." |
| ), |
| "categories": { |
| "stable": "Category 1 - Facts that never change (control)", |
| "known_drift": "Category 2 - Facts that changed pre-cutoff (model knows both)", |
| "unknown_drift": "Category 3 - Facts that changed post-cutoff (model doesn't know)", |
| "no_drift": "Category 4 - Changeable facts that didn't change (control)", |
| }, |
| "total_samples": len(flat_samples), |
| }, |
| "samples": flat_samples, |
| } |
| |
| |
| output_path = os.path.join(DATA_DIR, "knowledge_drift_dataset.json") |
| with open(output_path, 'w', encoding='utf-8') as f: |
| json.dump(dataset, f, indent=2, ensure_ascii=False) |
| |
| logger.info(f"Saved unified dataset to {output_path}") |
| logger.info(f"Total query-level samples: {len(flat_samples)}") |
| |
| |
| compact_path = os.path.join(DATA_DIR, "knowledge_drift_compact.jsonl") |
| with open(compact_path, 'w', encoding='utf-8') as f: |
| for sample in flat_samples: |
| f.write(json.dumps(sample, ensure_ascii=False) + '\n') |
| |
| logger.info(f"Saved compact JSONL to {compact_path}") |
| |
| return dataset |
|
|
|
|
| def print_stats(): |
| """Print detailed statistics about the dataset.""" |
| dataset_path = os.path.join(DATA_DIR, "knowledge_drift_dataset.json") |
| |
| if not os.path.exists(dataset_path): |
| logger.error(f"Dataset not found at {dataset_path}. Run --merge first.") |
| return |
| |
| with open(dataset_path, 'r', encoding='utf-8') as f: |
| dataset = json.load(f) |
| |
| samples = dataset["samples"] |
| |
| print("\n" + "=" * 70) |
| print(" KNOWLEDGE DRIFT DATASET STATISTICS") |
| print("=" * 70) |
| |
| print(f"\nTotal query-level samples: {len(samples)}") |
| |
| |
| cat_counts = Counter(s["category"] for s in samples) |
| print(f"\n๐ By Category:") |
| for cat, count in sorted(cat_counts.items()): |
| pct = 100 * count / len(samples) |
| bar = "โ" * int(pct / 2) |
| print(f" {cat:20s}: {count:5d} ({pct:5.1f}%) {bar}") |
| |
| |
| tz_counts = Counter(s["temporal_zone"] for s in samples) |
| print(f"\nโฐ By Temporal Zone:") |
| for tz, count in sorted(tz_counts.items()): |
| pct = 100 * count / len(samples) |
| print(f" {tz:20s}: {count:5d} ({pct:5.1f}%)") |
| |
| |
| kt_counts = Counter(s["knowledge_type"] for s in samples) |
| print(f"\n๐ง By Knowledge Type:") |
| for kt, count in sorted(kt_counts.items()): |
| pct = 100 * count / len(samples) |
| print(f" {kt:20s}: {count:5d} ({pct:5.1f}%)") |
| |
| |
| drifted = sum(1 for s in samples if s.get("is_drifted_query", False)) |
| print(f"\n๐ Drifted Queries (post-cutoff, answer changed):") |
| if len(samples) > 0: |
| print(f" Total: {drifted} / {len(samples)} ({100*drifted/len(samples):.1f}%)") |
| else: |
| print(f" Total: 0 (dataset is empty โ check that --merge ran correctly)") |
| |
| |
| lang_counts = Counter(s.get("language", "en") for s in samples) |
| print(f"\n๐ By Language:") |
| for lang, count in sorted(lang_counts.items()): |
| print(f" {lang:5s}: {count:5d}") |
| |
| |
| src_counts = Counter(s.get("source", "unknown") for s in samples) |
| print(f"\n๐ฆ By Source:") |
| for src, count in sorted(src_counts.items()): |
| print(f" {src:25s}: {count:5d}") |
| |
| |
| year_counts = Counter(s.get("year", 0) for s in samples) |
| print(f"\n๐
By Year:") |
| for year, count in sorted(year_counts.items()): |
| marker = " โ CUTOFF" if year == 2024 else (" โ POST-CUTOFF (drift zone)" if year == 2025 else "") |
| print(f" {year}: {count:5d}{marker}") |
| |
| |
| print(f"\n๐ฌ Key Comparisons for Probing:") |
| |
| post_cutoff_drifted = [s for s in samples if s["temporal_zone"] == "post_cutoff" and s.get("is_drifted_query")] |
| post_cutoff_stable = [s for s in samples if s["temporal_zone"] == "post_cutoff" and not s.get("is_drifted_query")] |
| pre_cutoff = [s for s in samples if s["temporal_zone"] == "pre_cutoff"] |
| |
| print(f" Comparison 1 (drift vs no-drift, both post-cutoff):") |
| print(f" Post-cutoff DRIFTED: {len(post_cutoff_drifted)}") |
| print(f" Post-cutoff STABLE: {len(post_cutoff_stable)}") |
| |
| print(f" Comparison 2 (pre vs post cutoff):") |
| print(f" Pre-cutoff queries: {len(pre_cutoff)}") |
| print(f" Post-cutoff queries: {len(post_cutoff_drifted) + len(post_cutoff_stable)}") |
| |
| |
| print(f"\n๐ Example Queries:") |
| |
| for cat_name in ["stable", "known_drift", "unknown_drift", "no_drift"]: |
| cat_samples = [s for s in samples if s["category"] == cat_name] |
| if cat_samples: |
| example = cat_samples[0] |
| print(f"\n [{cat_name}]") |
| print(f" Query: {example['query']}") |
| print(f" Answer: {example['expected_answer']}") |
| if example.get('model_likely_answer'): |
| print(f" Model likely says: {example['model_likely_answer']}") |
| |
| print("\n" + "=" * 70) |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Knowledge Drift Dataset Collection") |
| parser.add_argument("--all", action="store_true", help="Run everything") |
| parser.add_argument("--static", action="store_true", help="Collect static facts only") |
| parser.add_argument("--wikidata", action="store_true", help="Collect from Wikidata only") |
| parser.add_argument("--merge", action="store_true", help="Merge existing raw files") |
| parser.add_argument("--stats", action="store_true", help="Print dataset statistics") |
| |
| args = parser.parse_args() |
| |
| |
| if not any([args.all, args.static, args.wikidata, args.merge, args.stats]): |
| args.all = True |
| |
| ensure_data_dir() |
| |
| if args.all or args.static: |
| collect_static() |
| |
| if args.all or args.wikidata: |
| collect_wikidata() |
| |
| if args.all or args.merge: |
| merge_all() |
| |
| if args.all or args.stats: |
| print_stats() |
|
|
|
|
| if __name__ == "__main__": |
| main() |