""" Generate Paraphrases for Robustness Experiment ================================================ For each query in the Tier 1 dataset, generate 5 diverse paraphrases using Gemini. The paraphrases vary surface form while preserving: - The factual meaning - The year reference - The entity and relation - The expected answer Output: - data/tier1_paraphrased.json (original + 5 paraphrases per fact) - data/tier1__paraphrased.json (per-model versions) Usage: export GOOGLE_API_KEY="your_key" cd ~/svd_kg/knowledge_drift python generate_paraphrases.py # To resume from checkpoint: python generate_paraphrases.py --resume """ import json, os, re, time, sys, argparse from collections import Counter from concurrent.futures import ThreadPoolExecutor, as_completed from threading import Lock from datetime import datetime import google.generativeai as genai # ============================================================ # CONFIG # ============================================================ GEMINI_MODEL = "gemini-2.5-flash" GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "") if not GOOGLE_API_KEY: print("ERROR: Set GOOGLE_API_KEY environment variable") sys.exit(1) RATE_LIMIT_DELAY = 0.15 MAX_WORKERS = 100 BATCH_SIZE = 10 # fewer per batch since we're generating more text NUM_PARAPHRASES = 5 TIER1_PATH = "data/knowledge_drift_unified_tier1.json" OUTPUT_PATH = "data/tier1_paraphrased.json" CHECKPOINT_PATH = "data/_paraphrase_checkpoint.json" # ============================================================ # SETUP # ============================================================ genai.configure(api_key=GOOGLE_API_KEY) model = genai.GenerativeModel(GEMINI_MODEL) # ============================================================ # PROMPT # ============================================================ PARAPHRASE_PROMPT = """You are generating paraphrases for a research dataset. For each query below, generate exactly 5 diverse paraphrases. RULES: 1. Each paraphrase must ask the SAME factual question but with DIFFERENT wording 2. PRESERVE the year — it must appear in every paraphrase 3. PRESERVE entity names exactly (do NOT change names) 4. VARY the structure: use questions, fill-in-the-blank (with ___), statements, different word order 5. Keep paraphrases concise — similar length to the original 6. Do NOT add information not in the original 7. Make paraphrases genuinely diverse — don't just swap one word Example: Original: "In 2025, the head of government of the United Kingdom is ___" Paraphrases: 1. "Who served as the United Kingdom's head of government in 2025?" 2. "As of 2025, ___ held the position of head of government in the United Kingdom" 3. "The person leading the government of the United Kingdom in 2025 was ___" 4. "In the year 2025, who was the head of government of the United Kingdom?" 5. "Name the head of government of the United Kingdom for the year 2025" Return ONLY a JSON array of arrays. Each inner array has exactly 5 strings (the paraphrases). No explanation, no markdown formatting, just the JSON array.""" # ============================================================ # PARAPHRASE GENERATION # ============================================================ lock = Lock() results = {} # sample_id -> list of 5 paraphrases errors = [] total_done = 0 total_to_do = 0 def generate_batch(batch): """Generate paraphrases for a batch of queries.""" sample_ids = [s["sample_id"] for s in batch] queries = [s["query"] for s in batch] prompt = f"""{PARAPHRASE_PROMPT} Generate 5 paraphrases for each of these {len(queries)} queries: {json.dumps(queries, indent=2)}""" try: response = model.generate_content( [{"role": "user", "parts": [prompt]}], generation_config=genai.types.GenerationConfig( temperature=0.7, # some diversity max_output_tokens=8192, ), ) text = response.text.strip() # Extract JSON if "```json" in text: text = text.split("```json")[1].split("```")[0].strip() elif "```" in text: text = text.split("```")[1].split("```")[0].strip() parsed = json.loads(text) # Validate structure if len(parsed) != len(sample_ids): # Try to salvage what we can result = {} for i, sid in enumerate(sample_ids): if i < len(parsed) and isinstance(parsed[i], list) and len(parsed[i]) == NUM_PARAPHRASES: result[sid] = parsed[i] else: # Generate simple mechanical paraphrases as fallback result[sid] = _mechanical_paraphrases(queries[i]) return result result = {} for sid, query, paras in zip(sample_ids, queries, parsed): if isinstance(paras, list) and len(paras) >= NUM_PARAPHRASES: # Validate each paraphrase contains the year year_match = re.search(r'\b(20\d{2})\b', query) year = year_match.group(1) if year_match else None valid = [] for p in paras[:NUM_PARAPHRASES]: if isinstance(p, str) and len(p) > 10: if year and year not in p: p = f"In {year}, {p}" # force year if missing valid.append(p) while len(valid) < NUM_PARAPHRASES: valid.append(query) # pad with original if needed result[sid] = valid[:NUM_PARAPHRASES] else: result[sid] = _mechanical_paraphrases(query) return result except Exception as e: # Fallback: mechanical paraphrases result = {} for sid, q in zip(sample_ids, queries): result[sid] = _mechanical_paraphrases(q) return result def _mechanical_paraphrases(query): """Simple rule-based paraphrases as fallback.""" year_match = re.search(r'\b(20\d{2})\b', query) year = year_match.group(1) if year_match else "2025" # Extract core content (remove "In YYYY, ") core = re.sub(r'^In \d{4},?\s*', '', query).strip() paras = [ f"As of {year}, {core}", f"In the year {year}, {core}", f"For {year}, {core}", f"During {year}, {core}", query, # original as last fallback ] return paras[:NUM_PARAPHRASES] # ============================================================ # MAIN # ============================================================ def main(): global total_done, total_to_do pa = argparse.ArgumentParser() pa.add_argument("--resume", action="store_true", help="Resume from checkpoint") pa.add_argument("--max_workers", type=int, default=MAX_WORKERS) pa.add_argument("--batch_size", type=int, default=BATCH_SIZE) args = pa.parse_args() print("=" * 70) print(" GENERATE PARAPHRASES FOR ROBUSTNESS EXPERIMENT") print(f" Model: {GEMINI_MODEL}") print(f" Paraphrases per query: {NUM_PARAPHRASES}") print("=" * 70) # Load dataset print(f"\nLoading {TIER1_PATH}...") with open(TIER1_PATH) as f: tier1 = json.load(f) samples = tier1["samples"] print(f" Loaded: {len(samples)} samples") # Load checkpoint if resuming if args.resume and os.path.exists(CHECKPOINT_PATH): print(f" Resuming from checkpoint...") with open(CHECKPOINT_PATH) as f: results.update(json.load(f)) print(f" Loaded {len(results)} cached paraphrases") # Filter out already-done samples todo = [s for s in samples if s["sample_id"] not in results] total_to_do = len(todo) print(f" To generate: {total_to_do}") if total_to_do == 0: print(" All paraphrases already generated!") else: # Create batches batches = [] for i in range(0, len(todo), args.batch_size): batches.append(todo[i:i + args.batch_size]) print(f" Batches: {len(batches)} ({args.batch_size} queries each)") print(f" Workers: {args.max_workers}") print(f" Estimated time: ~{len(batches) * RATE_LIMIT_DELAY / 60 + len(batches) / args.max_workers * 2:.0f} minutes") print() # Process checkpoint_interval = 500 # save checkpoint every N samples with ThreadPoolExecutor(max_workers=args.max_workers) as executor: futures = {} for batch_idx, batch in enumerate(batches): future = executor.submit(generate_batch, batch) futures[future] = batch_idx time.sleep(RATE_LIMIT_DELAY) for future in as_completed(futures): batch_idx = futures[future] try: batch_results = future.result() with lock: results.update(batch_results) total_done += len(batch_results) if total_done % 100 == 0 or total_done == total_to_do: print(f" Progress: {total_done}/{total_to_do} ({100*total_done/total_to_do:.1f}%)") # Checkpoint if total_done % checkpoint_interval == 0: with open(CHECKPOINT_PATH, "w") as f: json.dump(results, f) print(f" 💾 Checkpoint saved ({len(results)} paraphrases)") except Exception as e: errors.append((batch_idx, str(e))) # Use mechanical fallback batch = batches[batch_idx] with lock: for s in batch: results[s["sample_id"]] = _mechanical_paraphrases(s["query"]) total_done += len(batch) print(f"\n Done. Generated: {len(results)}, Errors: {len(errors)}") # Save final checkpoint with open(CHECKPOINT_PATH, "w") as f: json.dump(results, f) # ============================================================ # BUILD PARAPHRASED DATASET # ============================================================ print("\nBuilding paraphrased dataset...") paraphrased_samples = [] stats = {"original": 0, "paraphrased": 0, "missing": 0} for s in samples: sid = s["sample_id"] # Add original as version 0 s_orig = dict(s) s_orig["para_version"] = 0 s_orig["para_group"] = sid # group ID to link paraphrases of same fact s_orig["is_paraphrase"] = False paraphrased_samples.append(s_orig) stats["original"] += 1 # Add paraphrases as versions 1-5 paras = results.get(sid, []) if not paras: stats["missing"] += 1 continue for v, para_query in enumerate(paras, 1): s_para = dict(s) s_para["sample_id"] = f"{sid}_p{v}" s_para["query"] = para_query s_para["query_original"] = s["query"] s_para["para_version"] = v s_para["para_group"] = sid s_para["is_paraphrase"] = True paraphrased_samples.append(s_para) stats["paraphrased"] += 1 print(f" Original samples: {stats['original']}") print(f" Paraphrased samples: {stats['paraphrased']}") print(f" Missing paraphrases: {stats['missing']}") print(f" Total: {len(paraphrased_samples)}") # ============================================================ # SHOW EXAMPLES # ============================================================ print("\nExamples:") shown = 0 for s in samples[:50]: sid = s["sample_id"] paras = results.get(sid, []) if paras and paras[0] != s["query"]: print(f"\n Original: {s['query'][:80]}") for i, p in enumerate(paras, 1): print(f" Para {i}: {p[:80]}") shown += 1 if shown >= 5: break # ============================================================ # SAVE MASTER PARAPHRASED DATASET # ============================================================ print(f"\nSaving paraphrased dataset...") para_dataset = { "metadata": { "name": "Knowledge Drift - Tier 1 with Paraphrases", "version": "3.0", "created": datetime.now().isoformat(), "description": f"Each fact has {NUM_PARAPHRASES} paraphrases for robustness testing", "original_samples": stats["original"], "paraphrased_samples": stats["paraphrased"], "total_samples": len(paraphrased_samples), "paraphrases_per_fact": NUM_PARAPHRASES, }, "samples": paraphrased_samples, } with open(OUTPUT_PATH, "w") as f: json.dump(para_dataset, f, indent=2, ensure_ascii=False) print(f" Saved: {OUTPUT_PATH} ({len(paraphrased_samples)} samples)") # ============================================================ # CREATE PER-MODEL PARAPHRASED DATASETS # ============================================================ print("\nCreating per-model paraphrased datasets...") MODELS = { "llama2": "is_drifted_llama2", "mistral": "is_drifted_mistral", "llama31": "is_drifted_llama31", "qwen25": "is_drifted_qwen25", "gemma2": "is_drifted_gemma2", } for model_name, drift_key in MODELS.items(): model_samples = [] for s in paraphrased_samples: ms = dict(s) ms["is_drifted_query"] = s.get(drift_key, False) ms["temporal_zone"] = "post_cutoff" model_samples.append(ms) model_dataset = { "metadata": para_dataset["metadata"].copy(), "samples": model_samples, } path = f"data/tier1_{model_name}_paraphrased.json" with open(path, "w") as f: json.dump(model_dataset, f, indent=2, ensure_ascii=False) n_d = sum(1 for s in model_samples if s["is_drifted_query"]) n_orig = sum(1 for s in model_samples if not s["is_paraphrase"]) n_para = sum(1 for s in model_samples if s["is_paraphrase"]) print(f" {model_name:10s}: {path}") print(f" {n_orig} originals + {n_para} paraphrases, {n_d} drifted") # ============================================================ # CREATE PARAPHRASE-ONLY DATASETS (for test-only evaluation) # ============================================================ print("\nCreating paraphrase-only datasets (for testing probe trained on originals)...") for model_name, drift_key in MODELS.items(): para_only = [] for s in paraphrased_samples: if s["is_paraphrase"]: ms = dict(s) ms["is_drifted_query"] = s.get(drift_key, False) ms["temporal_zone"] = "post_cutoff" para_only.append(ms) path = f"data/tier1_{model_name}_paraonly.json" with open(path, "w") as f: json.dump({"samples": para_only}, f, indent=2, ensure_ascii=False) n_d = sum(1 for s in para_only if s["is_drifted_query"]) print(f" {model_name:10s}: {path} ({len(para_only)} samples, {n_d} drifted)") # ============================================================ # DONE # ============================================================ print(f""" {'=' * 70} DONE — PARAPHRASED DATASETS READY {'=' * 70} Master: {OUTPUT_PATH} ({len(paraphrased_samples)} samples) Per-model: data/tier1__paraphrased.json (originals + paras) Test-only: data/tier1__paraonly.json (paraphrases only) EXPERIMENT WORKFLOW: 1. ROBUSTNESS TEST (probe trained on originals): - Train probe on data/tier1_.json (originals only) - Test on data/tier1__paraonly.json - Report AUROC drop (if any) 2. AUGMENTED TRAINING: - Train probe on data/tier1__paraphrased.json - Test on held-out paraphrases (use para_version for splitting) - Report recovered AUROC 3. REVERSE TRANSFER: - Train probe on paraphrases only - Test on originals - Report AUROC {'=' * 70} """) if __name__ == "__main__": main()