Raniahossam33 commited on
Commit
14b2318
·
verified ·
1 Parent(s): dd5a648

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +27 -0
  2. analyze_drift_signals.py +533 -0
  3. analyze_single.py +1235 -0
  4. collect_all.py +409 -0
  5. collectors/__init__.py +0 -0
  6. collectors/static_collector.py +324 -0
  7. collectors/wikidata_collector.py +359 -0
  8. collectors/wikipedia_collector.py +165 -0
  9. configs/__init__.py +0 -0
  10. configs/config.py +530 -0
  11. convert_sparql_to_samples.py +60 -0
  12. cross_model.py +856 -0
  13. data/_paraphrase_checkpoint.json +0 -0
  14. data/experiments/attention_analysis/attention_raw.json +0 -0
  15. data/experiments/attention_analysis/attention_summary.json +270 -0
  16. data/experiments/disentanglement/cached_states.npz +3 -0
  17. data/experiments/disentanglement/disentanglement_results.json +331 -0
  18. data/experiments/disentanglement/fig1_disentanglement_overview.png +3 -0
  19. data/experiments/disentanglement/fig2_pca_visualization.png +3 -0
  20. data/experiments/disentanglement/fig3_drift_vs_uncertainty_directions.png +3 -0
  21. data/experiments/disentanglement/fig4_2x2_cell_analysis.png +3 -0
  22. data/experiments/disentanglement_llama31/cached_states.npz +3 -0
  23. data/experiments/drift_neurons/drift_neuron_results.json +8 -0
  24. data/experiments/entropy_analysis/layer_signals_for_plotting.json +490 -0
  25. data/experiments/entropy_analysis/raw_results.json +0 -0
  26. data/experiments/entropy_analysis/signal_summary.json +522 -0
  27. data/experiments/tier1_gemma2_v2/all_layer_results.json +0 -0
  28. data/experiments/tier1_gemma2_v2/cached_states.npz +3 -0
  29. data/experiments/tier1_gemma2_v2/figures/fig1_dashboard.png +3 -0
  30. data/experiments/tier1_gemma2_v2/figures/fig2_per_relation.png +3 -0
  31. data/experiments/tier1_gemma2_v2/figures/fig3_2x2_cells.png +3 -0
  32. data/experiments/tier1_gemma2_v2/figures/fig4_pca_projections.png +3 -0
  33. data/experiments/tier1_gemma2_v2/figures/fig5_cosine_matrix.png +3 -0
  34. data/experiments/tier1_gemma2_v2/figures/fig6_sparsity_tradeoff.png +3 -0
  35. data/experiments/tier1_gemma2_v2/figures/fig7_neuron_overlap.png +3 -0
  36. data/experiments/tier1_gemma2_v2/figures/fig8_weight_distributions.png +3 -0
  37. data/experiments/tier1_gemma2_v2/final_results.json +0 -0
  38. data/experiments/tier1_gemma2_v2/per_layer/layer_00.json +92 -0
  39. data/experiments/tier1_gemma2_v2/per_layer/layer_01.json +92 -0
  40. data/experiments/tier1_gemma2_v2/per_layer/layer_02.json +92 -0
  41. data/experiments/tier1_gemma2_v2/per_layer/layer_03.json +92 -0
  42. data/experiments/tier1_gemma2_v2/per_layer/layer_04.json +92 -0
  43. data/experiments/tier1_gemma2_v2/per_layer/layer_05.json +92 -0
  44. data/experiments/tier1_gemma2_v2/per_layer/layer_06.json +92 -0
  45. data/experiments/tier1_gemma2_v2/per_layer/layer_07.json +92 -0
  46. data/experiments/tier1_gemma2_v2/per_layer/layer_08.json +92 -0
  47. data/experiments/tier1_gemma2_v2/per_layer/layer_09.json +92 -0
  48. data/experiments/tier1_gemma2_v2/per_layer/layer_10.json +92 -0
  49. data/experiments/tier1_gemma2_v2/per_layer/layer_11.json +92 -0
  50. data/experiments/tier1_gemma2_v2/per_layer/layer_12.json +92 -0
.gitattributes CHANGED
@@ -58,3 +58,30 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+ data/external/sparql_extension_samples.json filter=lfs diff=lfs merge=lfs -text
62
+ data/external/templama_converted.json filter=lfs diff=lfs merge=lfs -text
63
+ data/external/templama_test.json filter=lfs diff=lfs merge=lfs -text
64
+ data/external/temporal-robustness/data/dynamic-templama/dataset_from_2019-1-1_to_2022-12-31_per_quarter/test.jsonl filter=lfs diff=lfs merge=lfs -text
65
+ data/external/temporal-robustness/data/dynamic-templama/dataset_from_2019-1-1_to_2022-6-31_per_quarter/test.jsonl filter=lfs diff=lfs merge=lfs -text
66
+ data/external/temporal-robustness/data/dynamic-templama/dataset_from_2019-1-1_to_2022-6-31_per_quarter_improved/test.jsonl filter=lfs diff=lfs merge=lfs -text
67
+ data/external/temporal-robustness/dynamic-templama-data-analysis.ipynb filter=lfs diff=lfs merge=lfs -text
68
+ data/knowledge_drift_dataset_multimodel.json filter=lfs diff=lfs merge=lfs -text
69
+ data/knowledge_drift_unified_full.json filter=lfs diff=lfs merge=lfs -text
70
+ data/knowledge_drift_unified_tier1.json filter=lfs diff=lfs merge=lfs -text
71
+ data/knowledge_drift_unified_tier1_clean.json filter=lfs diff=lfs merge=lfs -text
72
+ data/tier1_gemma2.json filter=lfs diff=lfs merge=lfs -text
73
+ data/tier1_gemma2_paraonly.json filter=lfs diff=lfs merge=lfs -text
74
+ data/tier1_gemma2_paraphrased.json filter=lfs diff=lfs merge=lfs -text
75
+ data/tier1_llama2.json filter=lfs diff=lfs merge=lfs -text
76
+ data/tier1_llama2_paraonly.json filter=lfs diff=lfs merge=lfs -text
77
+ data/tier1_llama2_paraphrased.json filter=lfs diff=lfs merge=lfs -text
78
+ data/tier1_llama31.json filter=lfs diff=lfs merge=lfs -text
79
+ data/tier1_llama31_paraonly.json filter=lfs diff=lfs merge=lfs -text
80
+ data/tier1_llama31_paraphrased.json filter=lfs diff=lfs merge=lfs -text
81
+ data/tier1_mistral.json filter=lfs diff=lfs merge=lfs -text
82
+ data/tier1_mistral_paraonly.json filter=lfs diff=lfs merge=lfs -text
83
+ data/tier1_mistral_paraphrased.json filter=lfs diff=lfs merge=lfs -text
84
+ data/tier1_paraphrased.json filter=lfs diff=lfs merge=lfs -text
85
+ data/tier1_qwen25.json filter=lfs diff=lfs merge=lfs -text
86
+ data/tier1_qwen25_paraonly.json filter=lfs diff=lfs merge=lfs -text
87
+ data/tier1_qwen25_paraphrased.json filter=lfs diff=lfs merge=lfs -text
analyze_drift_signals.py ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Knowledge Drift Internal Signal Analysis
3
+ ==========================================
4
+ Runs queries through a model and measures internal confidence signals
5
+ to see if the model "knows it doesn't know" for drifted facts.
6
+
7
+ Metrics computed per query:
8
+ 1. Output entropy (how uncertain is the final prediction?)
9
+ 2. Top-1 probability (how confident in the best answer?)
10
+ 3. Top-5 token probabilities (is the model spread across options?)
11
+ 4. Logit lens per layer (what does the model predict at each layer?)
12
+ 5. Layer-wise entropy (does uncertainty build up or resolve across layers?)
13
+ 6. Old vs New answer probability (does the model consider both?)
14
+
15
+ Usage:
16
+ python analyze_drift_signals.py \
17
+ --model Qwen/Qwen2.5-7B-Instruct \
18
+ --dataset data/knowledge_drift_dataset.json \
19
+ --output data/drift_analysis/ \
20
+ --max_samples 500
21
+ """
22
+
23
+ import argparse
24
+ import json
25
+ import os
26
+ import logging
27
+ import torch
28
+ import torch.nn.functional as F
29
+ import numpy as np
30
+ from tqdm import tqdm
31
+ from collections import defaultdict
32
+ from datetime import datetime
33
+
34
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
35
+ logger = logging.getLogger(__name__)
36
+
37
+
38
+ def load_model(model_name, device="auto"):
39
+ """Load model with hooks for internal state access."""
40
+ from transformers import AutoModelForCausalLM, AutoTokenizer
41
+
42
+ logger.info(f"Loading model: {model_name}")
43
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
44
+ if tokenizer.pad_token is None:
45
+ tokenizer.pad_token = tokenizer.eos_token
46
+
47
+ model = AutoModelForCausalLM.from_pretrained(
48
+ model_name,
49
+ torch_dtype=torch.float16,
50
+ device_map=device,
51
+ trust_remote_code=True,
52
+ output_hidden_states=True,
53
+ )
54
+ model.eval()
55
+
56
+ num_layers = model.config.num_hidden_layers
57
+ hidden_dim = model.config.hidden_size
58
+ vocab_size = model.config.vocab_size
59
+ logger.info(f"Model loaded: {num_layers} layers, {hidden_dim} hidden dim, {vocab_size} vocab")
60
+
61
+ return model, tokenizer
62
+
63
+
64
+ def compute_entropy(logits):
65
+ """Compute entropy of a probability distribution from logits."""
66
+ probs = F.softmax(logits, dim=-1)
67
+ log_probs = F.log_softmax(logits, dim=-1)
68
+ entropy = -(probs * log_probs).sum(dim=-1)
69
+ return entropy.item()
70
+
71
+
72
+ def compute_top_k_probs(logits, tokenizer, k=10):
73
+ """Get top-k tokens and their probabilities."""
74
+ probs = F.softmax(logits, dim=-1)
75
+ top_probs, top_indices = probs.topk(k)
76
+
77
+ results = []
78
+ for i in range(k):
79
+ token = tokenizer.decode(top_indices[i].item())
80
+ prob = top_probs[i].item()
81
+ results.append({"token": token, "probability": prob, "token_id": top_indices[i].item()})
82
+
83
+ return results
84
+
85
+
86
+ def get_answer_token_ids(tokenizer, answer_text):
87
+ """Get the first token ID(s) for an answer string."""
88
+ # Try to find the first meaningful token of the answer
89
+ tokens = tokenizer.encode(answer_text, add_special_tokens=False)
90
+ if len(tokens) > 0:
91
+ return tokens[0] # First token of the answer
92
+ return None
93
+
94
+
95
+ def analyze_single_query(model, tokenizer, query, expected_answer="", old_answer="", device="cuda"):
96
+ """
97
+ Run a single query and collect all internal signals.
98
+
99
+ Returns a dict with:
100
+ - output_entropy: entropy of final layer output distribution
101
+ - top1_prob: probability of the most likely next token
102
+ - top10_tokens: top 10 predicted tokens with probabilities
103
+ - layer_entropies: entropy at each layer (via logit lens)
104
+ - layer_top1_tokens: what each layer predicts as top-1
105
+ - layer_top1_probs: confidence of each layer's top prediction
106
+ - expected_answer_rank: rank of expected answer token in final output
107
+ - expected_answer_prob: probability of expected answer token
108
+ - old_answer_rank: rank of old/outdated answer token (for drifted queries)
109
+ - old_answer_prob: probability of old answer token
110
+ """
111
+ # Format as simple question
112
+ prompt = f"<|im_start|>system\nAnswer the following question concisely in a few words.<|im_end|>\n<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"
113
+
114
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
115
+ inputs = {k: v.to(device) for k, v in inputs.items()}
116
+
117
+ with torch.no_grad():
118
+ outputs = model(**inputs, output_hidden_states=True)
119
+
120
+ # === FINAL LAYER ANALYSIS ===
121
+ final_logits = outputs.logits[0, -1, :] # [vocab_size]
122
+
123
+ output_entropy = compute_entropy(final_logits)
124
+ top10 = compute_top_k_probs(final_logits, tokenizer, k=10)
125
+ top1_prob = top10[0]["probability"]
126
+ top1_token = top10[0]["token"]
127
+
128
+ # === ANSWER TOKEN TRACKING ===
129
+ expected_token_id = get_answer_token_ids(tokenizer, expected_answer) if expected_answer else None
130
+ old_token_id = get_answer_token_ids(tokenizer, old_answer) if old_answer else None
131
+
132
+ final_probs = F.softmax(final_logits, dim=-1)
133
+ sorted_indices = final_probs.argsort(descending=True)
134
+
135
+ expected_answer_prob = 0.0
136
+ expected_answer_rank = -1
137
+ if expected_token_id is not None:
138
+ expected_answer_prob = final_probs[expected_token_id].item()
139
+ rank_mask = (sorted_indices == expected_token_id).nonzero(as_tuple=True)[0]
140
+ expected_answer_rank = rank_mask[0].item() if len(rank_mask) > 0 else -1
141
+
142
+ old_answer_prob = 0.0
143
+ old_answer_rank = -1
144
+ if old_token_id is not None:
145
+ old_answer_prob = final_probs[old_token_id].item()
146
+ rank_mask = (sorted_indices == old_token_id).nonzero(as_tuple=True)[0]
147
+ old_answer_rank = rank_mask[0].item() if len(rank_mask) > 0 else -1
148
+
149
+ # === LAYER-BY-LAYER ANALYSIS (Logit Lens) ===
150
+ # Project each layer's hidden state into vocabulary space
151
+ # using the model's unembedding matrix (lm_head)
152
+ hidden_states = outputs.hidden_states # tuple of (num_layers + 1) tensors
153
+ num_layers = len(hidden_states) - 1 # exclude embedding layer
154
+
155
+ # Get the unembedding matrix
156
+ if hasattr(model, 'lm_head'):
157
+ unembed = model.lm_head
158
+ elif hasattr(model, 'model') and hasattr(model.model, 'lm_head'):
159
+ unembed = model.model.lm_head
160
+ else:
161
+ unembed = None
162
+
163
+ layer_entropies = []
164
+ layer_top1_tokens = []
165
+ layer_top1_probs = []
166
+ layer_expected_probs = []
167
+ layer_old_probs = []
168
+
169
+ if unembed is not None:
170
+ # Also need the layer norm before unembedding
171
+ if hasattr(model, 'model') and hasattr(model.model, 'norm'):
172
+ final_norm = model.model.norm
173
+ elif hasattr(model, 'transformer') and hasattr(model.transformer, 'ln_f'):
174
+ final_norm = model.transformer.ln_f
175
+ else:
176
+ final_norm = None
177
+
178
+ for layer_idx in range(num_layers):
179
+ h = hidden_states[layer_idx + 1][0, -1, :] # [hidden_dim], last token
180
+
181
+ # Apply final layer norm if available (important for logit lens accuracy)
182
+ if final_norm is not None:
183
+ h_normed = final_norm(h.unsqueeze(0)).squeeze(0)
184
+ else:
185
+ h_normed = h
186
+
187
+ # Project to vocabulary
188
+ layer_logits = unembed(h_normed.unsqueeze(0).to(unembed.weight.dtype)).squeeze(0)
189
+
190
+ # Entropy at this layer
191
+ layer_ent = compute_entropy(layer_logits)
192
+ layer_entropies.append(layer_ent)
193
+
194
+ # Top prediction at this layer
195
+ layer_probs = F.softmax(layer_logits, dim=-1)
196
+ top_prob, top_idx = layer_probs.max(dim=-1)
197
+ layer_top1_tokens.append(tokenizer.decode(top_idx.item()))
198
+ layer_top1_probs.append(top_prob.item())
199
+
200
+ # Track expected and old answer probability at each layer
201
+ if expected_token_id is not None:
202
+ layer_expected_probs.append(layer_probs[expected_token_id].item())
203
+ else:
204
+ layer_expected_probs.append(0.0)
205
+
206
+ if old_token_id is not None:
207
+ layer_old_probs.append(layer_probs[old_token_id].item())
208
+ else:
209
+ layer_old_probs.append(0.0)
210
+
211
+ # === ALSO GENERATE THE FULL ANSWER ===
212
+ with torch.no_grad():
213
+ gen_outputs = model.generate(
214
+ **inputs,
215
+ max_new_tokens=50,
216
+ do_sample=False,
217
+ )
218
+ generated_text = tokenizer.decode(gen_outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
219
+
220
+ return {
221
+ # Final output signals
222
+ "output_entropy": output_entropy,
223
+ "top1_probability": top1_prob,
224
+ "top1_token": top1_token,
225
+ "top10_tokens": top10,
226
+ "generated_answer": generated_text.strip(),
227
+
228
+ # Answer tracking
229
+ "expected_answer_prob": expected_answer_prob,
230
+ "expected_answer_rank": expected_answer_rank,
231
+ "old_answer_prob": old_answer_prob,
232
+ "old_answer_rank": old_answer_rank,
233
+
234
+ # Layer-by-layer signals
235
+ "layer_entropies": layer_entropies,
236
+ "layer_top1_tokens": layer_top1_tokens,
237
+ "layer_top1_probs": layer_top1_probs,
238
+ "layer_expected_answer_probs": layer_expected_probs,
239
+ "layer_old_answer_probs": layer_old_probs,
240
+ "num_layers": num_layers,
241
+ }
242
+
243
+
244
+ def run_analysis(model, tokenizer, samples, output_dir, device="cuda", max_samples=None):
245
+ """Run analysis on all samples and aggregate results."""
246
+ os.makedirs(output_dir, exist_ok=True)
247
+
248
+ if max_samples and len(samples) > max_samples:
249
+ # Balanced sampling: ensure we get drifted + non-drifted
250
+ import random
251
+ random.seed(42)
252
+ drifted = [s for s in samples if s.get("is_drifted_query")]
253
+ not_drifted = [s for s in samples if not s.get("is_drifted_query")]
254
+ n_drifted = min(len(drifted), max(max_samples // 3, 1))
255
+ n_other = min(len(not_drifted), max_samples - n_drifted)
256
+ if drifted:
257
+ samples = random.sample(drifted, n_drifted) + random.sample(not_drifted, n_other)
258
+ else:
259
+ samples = random.sample(not_drifted, min(len(not_drifted), max_samples))
260
+ random.shuffle(samples)
261
+ logger.info(f"Balanced sample: {n_drifted} drifted + {n_other} non-drifted = {len(samples)}")
262
+
263
+ all_results = []
264
+
265
+ # Group results by category for comparison
266
+ category_signals = defaultdict(lambda: {
267
+ "entropies": [], "top1_probs": [], "expected_probs": [],
268
+ "old_probs": [], "expected_ranks": [],
269
+ "layer_entropies": [], "layer_top1_probs": [],
270
+ "layer_expected_probs": [], "layer_old_probs": [],
271
+ })
272
+
273
+ logger.info(f"Analyzing {len(samples)} samples...")
274
+
275
+ for sample in tqdm(samples, desc="Analyzing queries"):
276
+ query = sample["query"]
277
+ expected = sample.get("expected_answer", "")
278
+ old = sample.get("model_likely_answer", "")
279
+ category = sample.get("category", "unknown")
280
+ is_drifted = sample.get("is_drifted_query", False)
281
+ year = sample.get("year", 0)
282
+
283
+ # Only analyze 2025 (post-cutoff) queries for the main comparison
284
+ # But also include some pre-cutoff for baseline
285
+ try:
286
+ result = analyze_single_query(
287
+ model, tokenizer, query,
288
+ expected_answer=expected,
289
+ old_answer=old,
290
+ device=device,
291
+ )
292
+ except Exception as e:
293
+ logger.error(f"Error analyzing query: {e}")
294
+ continue
295
+
296
+ # Add metadata
297
+ result["query"] = query
298
+ result["expected_answer"] = expected
299
+ result["model_likely_answer"] = old
300
+ result["category"] = category
301
+ result["is_drifted_query"] = is_drifted
302
+ result["year"] = year
303
+ result["knowledge_type"] = sample.get("knowledge_type", "")
304
+ result["temporal_zone"] = sample.get("temporal_zone", "")
305
+ result["entity"] = sample.get("entity", "")
306
+
307
+ all_results.append(result)
308
+
309
+ # Aggregate by category
310
+ # Use fine-grained grouping: category + temporal_zone
311
+ if is_drifted:
312
+ group = "DRIFTED (post-cutoff, answer changed)"
313
+ elif category == "stable":
314
+ if sample.get("temporal_zone") == "post_cutoff":
315
+ group = "STABLE (post-cutoff, never changes)"
316
+ else:
317
+ group = "STABLE (pre-cutoff, never changes)"
318
+ elif category == "no_drift":
319
+ if sample.get("temporal_zone") == "post_cutoff":
320
+ group = "NO_DRIFT (post-cutoff, could change but didn't)"
321
+ else:
322
+ group = "NO_DRIFT (pre-cutoff)"
323
+ elif category == "known_drift":
324
+ group = "KNOWN_DRIFT (pre-cutoff, model knows change)"
325
+ else:
326
+ group = f"{category}_{sample.get('temporal_zone', 'unknown')}"
327
+
328
+ signals = category_signals[group]
329
+ signals["entropies"].append(result["output_entropy"])
330
+ signals["top1_probs"].append(result["top1_probability"])
331
+ signals["expected_probs"].append(result["expected_answer_prob"])
332
+ signals["old_probs"].append(result["old_answer_prob"])
333
+ signals["expected_ranks"].append(result["expected_answer_rank"])
334
+ if result["layer_entropies"]:
335
+ signals["layer_entropies"].append(result["layer_entropies"])
336
+ signals["layer_top1_probs"].append(result["layer_top1_probs"])
337
+ signals["layer_expected_probs"].append(result["layer_expected_answer_probs"])
338
+ signals["layer_old_probs"].append(result["layer_old_answer_probs"])
339
+
340
+ # === SAVE RAW RESULTS ===
341
+ # Remove non-serializable items for JSON
342
+ serializable_results = []
343
+ for r in all_results:
344
+ sr = {k: v for k, v in r.items()}
345
+ serializable_results.append(sr)
346
+
347
+ raw_path = os.path.join(output_dir, "raw_results.json")
348
+ with open(raw_path, 'w', encoding='utf-8') as f:
349
+ json.dump(serializable_results, f, indent=2, ensure_ascii=False, default=str)
350
+ logger.info(f"Saved raw results to {raw_path}")
351
+
352
+ # === COMPUTE AND PRINT AGGREGATE STATISTICS ===
353
+ print("\n" + "=" * 80)
354
+ print(" KNOWLEDGE DRIFT INTERNAL SIGNAL ANALYSIS")
355
+ print("=" * 80)
356
+
357
+ # Header
358
+ print(f"\n{'Group':<55} {'Count':>6} {'Entropy':>10} {'Top1 Prob':>10} {'Exp Prob':>10} {'Exp Rank':>10}")
359
+ print("-" * 105)
360
+
361
+ summary = {}
362
+ for group in sorted(category_signals.keys()):
363
+ signals = category_signals[group]
364
+ n = len(signals["entropies"])
365
+ if n == 0:
366
+ continue
367
+
368
+ avg_entropy = np.mean(signals["entropies"])
369
+ avg_top1 = np.mean(signals["top1_probs"])
370
+ avg_exp_prob = np.mean(signals["expected_probs"])
371
+ valid_ranks = [r for r in signals["expected_ranks"] if r >= 0]
372
+ avg_rank = np.mean(valid_ranks) if valid_ranks else -1
373
+
374
+ print(f"{group:<55} {n:>6} {avg_entropy:>10.4f} {avg_top1:>10.4f} {avg_exp_prob:>10.4f} {avg_rank:>10.1f}")
375
+
376
+ summary[group] = {
377
+ "count": n,
378
+ "avg_entropy": float(avg_entropy),
379
+ "std_entropy": float(np.std(signals["entropies"])),
380
+ "avg_top1_prob": float(avg_top1),
381
+ "std_top1_prob": float(np.std(signals["top1_probs"])),
382
+ "avg_expected_answer_prob": float(avg_exp_prob),
383
+ "avg_expected_answer_rank": float(avg_rank),
384
+ "avg_old_answer_prob": float(np.mean(signals["old_probs"])) if signals["old_probs"] else 0,
385
+ }
386
+
387
+ # Compute layer-wise averages
388
+ if signals["layer_entropies"]:
389
+ layer_ent_array = np.array(signals["layer_entropies"])
390
+ layer_top1_array = np.array(signals["layer_top1_probs"])
391
+ layer_exp_array = np.array(signals["layer_expected_probs"])
392
+ layer_old_array = np.array(signals["layer_old_probs"])
393
+
394
+ summary[group]["layer_avg_entropies"] = layer_ent_array.mean(axis=0).tolist()
395
+ summary[group]["layer_avg_top1_probs"] = layer_top1_array.mean(axis=0).tolist()
396
+ summary[group]["layer_avg_expected_probs"] = layer_exp_array.mean(axis=0).tolist()
397
+ summary[group]["layer_avg_old_probs"] = layer_old_array.mean(axis=0).tolist()
398
+
399
+ # === KEY COMPARISONS ===
400
+ print("\n" + "=" * 80)
401
+ print(" KEY COMPARISONS")
402
+ print("=" * 80)
403
+
404
+ drifted_key = "DRIFTED (post-cutoff, answer changed)"
405
+ nodrift_key = "NO_DRIFT (post-cutoff, could change but didn't)"
406
+ stable_key = "STABLE (post-cutoff, never changes)"
407
+
408
+ if drifted_key in summary and nodrift_key in summary:
409
+ d = summary[drifted_key]
410
+ nd = summary[nodrift_key]
411
+
412
+ print(f"\n 🔥 DRIFTED vs NO_DRIFT (both post-cutoff, same fact types):")
413
+ print(f" Entropy: DRIFTED={d['avg_entropy']:.4f} vs NO_DRIFT={nd['avg_entropy']:.4f} "
414
+ f"(Δ={d['avg_entropy'] - nd['avg_entropy']:+.4f})")
415
+ print(f" Top1 Prob: DRIFTED={d['avg_top1_prob']:.4f} vs NO_DRIFT={nd['avg_top1_prob']:.4f} "
416
+ f"(Δ={d['avg_top1_prob'] - nd['avg_top1_prob']:+.4f})")
417
+ print(f" Exp Answer: DRIFTED={d['avg_expected_answer_prob']:.4f} vs NO_DRIFT={nd['avg_expected_answer_prob']:.4f}")
418
+
419
+ if d['avg_entropy'] > nd['avg_entropy']:
420
+ print(f"\n ✅ Model is MORE uncertain on drifted facts (higher entropy)")
421
+ else:
422
+ print(f"\n ⚠️ Model is NOT more uncertain on drifted facts")
423
+
424
+ if d['avg_top1_prob'] < nd['avg_top1_prob']:
425
+ print(f" ✅ Model is LESS confident on drifted facts (lower top-1 prob)")
426
+ else:
427
+ print(f" ⚠️ Model is NOT less confident on drifted facts")
428
+
429
+ if drifted_key in summary and stable_key in summary:
430
+ d = summary[drifted_key]
431
+ s = summary[stable_key]
432
+ print(f"\n 🔥 DRIFTED vs STABLE (post-cutoff):")
433
+ print(f" Entropy: DRIFTED={d['avg_entropy']:.4f} vs STABLE={s['avg_entropy']:.4f} "
434
+ f"(Δ={d['avg_entropy'] - s['avg_entropy']:+.4f})")
435
+ print(f" Top1 Prob: DRIFTED={d['avg_top1_prob']:.4f} vs STABLE={s['avg_top1_prob']:.4f} "
436
+ f"(Δ={d['avg_top1_prob'] - s['avg_top1_prob']:+.4f})")
437
+
438
+ # === SAVE SUMMARY ===
439
+ summary_path = os.path.join(output_dir, "signal_summary.json")
440
+ with open(summary_path, 'w') as f:
441
+ json.dump(summary, f, indent=2)
442
+ logger.info(f"Saved summary to {summary_path}")
443
+
444
+ # === SAVE LAYER-WISE DATA FOR PLOTTING ===
445
+ plot_data_path = os.path.join(output_dir, "layer_signals_for_plotting.json")
446
+ plot_data = {}
447
+ for group, data in summary.items():
448
+ if "layer_avg_entropies" in data:
449
+ plot_data[group] = {
450
+ "layer_entropies": data["layer_avg_entropies"],
451
+ "layer_top1_probs": data["layer_avg_top1_probs"],
452
+ "layer_expected_probs": data["layer_avg_expected_probs"],
453
+ "layer_old_probs": data["layer_avg_old_probs"],
454
+ }
455
+
456
+ with open(plot_data_path, 'w') as f:
457
+ json.dump(plot_data, f, indent=2)
458
+ logger.info(f"Saved plot data to {plot_data_path}")
459
+
460
+ # === PRINT EXAMPLE DRIFTED QUERIES ===
461
+ print("\n" + "=" * 80)
462
+ print(" EXAMPLE DRIFTED QUERIES (model answers vs ground truth)")
463
+ print("=" * 80)
464
+
465
+ drifted_examples = [r for r in all_results if r.get("is_drifted_query")][:10]
466
+ for ex in drifted_examples:
467
+ print(f"\n Q: {ex['query']}")
468
+ print(f" Model says: '{ex['generated_answer']}'")
469
+ print(f" Should be: '{ex['expected_answer']}'")
470
+ print(f" Old answer: '{ex['model_likely_answer']}'")
471
+ print(f" Entropy: {ex['output_entropy']:.4f} | Top1 prob: {ex['top1_probability']:.4f}")
472
+ top3 = ', '.join(f"{t['token']}({t['probability']:.3f})" for t in ex['top10_tokens'][:3])
473
+ print(f" Top 3 tokens: {top3}")
474
+
475
+ print("\n" + "=" * 80)
476
+
477
+ return summary
478
+
479
+
480
+ def main():
481
+ parser = argparse.ArgumentParser(description="Analyze internal drift signals")
482
+ parser.add_argument("--model", type=str, default="Qwen/Qwen2.5-7B-Instruct")
483
+ parser.add_argument("--dataset", type=str, default="data/knowledge_drift_dataset.json")
484
+ parser.add_argument("--output", type=str, default="data/drift_analysis/")
485
+ parser.add_argument("--max_samples", type=int, default=None)
486
+ parser.add_argument("--device", type=str, default="auto")
487
+ parser.add_argument("--post_cutoff_only", action="store_true",
488
+ help="Only analyze post-cutoff (2025) queries for speed")
489
+
490
+ args = parser.parse_args()
491
+
492
+ # Load dataset
493
+ logger.info(f"Loading dataset from {args.dataset}")
494
+ with open(args.dataset, 'r', encoding='utf-8') as f:
495
+ dataset = json.load(f)
496
+
497
+ samples = dataset["samples"]
498
+
499
+ # Filter to post-cutoff if requested (much faster, focuses on the key comparison)
500
+ if args.post_cutoff_only:
501
+ samples = [s for s in samples if s.get("temporal_zone") == "post_cutoff"]
502
+ logger.info(f"Filtered to {len(samples)} post-cutoff samples")
503
+
504
+ if args.max_samples:
505
+ # Smart sampling: ensure we get a balanced mix
506
+ drifted = [s for s in samples if s.get("is_drifted_query")]
507
+ not_drifted = [s for s in samples if not s.get("is_drifted_query")]
508
+
509
+ n_drifted = min(len(drifted), args.max_samples // 3)
510
+ n_other = min(len(not_drifted), args.max_samples - n_drifted)
511
+
512
+ import random
513
+ random.seed(42)
514
+ samples = random.sample(drifted, n_drifted) + random.sample(not_drifted, n_other)
515
+ random.shuffle(samples)
516
+ logger.info(f"Sampled {len(samples)} samples ({n_drifted} drifted, {n_other} non-drifted)")
517
+
518
+ # Load model
519
+ model, tokenizer = load_model(args.model, device=args.device)
520
+
521
+ device = "cuda" if torch.cuda.is_available() else "cpu"
522
+ if args.device != "auto":
523
+ device = args.device
524
+
525
+ # Run analysis
526
+ summary = run_analysis(model, tokenizer, samples, args.output, device=device)
527
+
528
+ logger.info(f"\nDone! Results saved to {args.output}")
529
+ logger.info(f"Next: python plot_drift_signals.py --input {args.output}")
530
+
531
+
532
+ if __name__ == "__main__":
533
+ main()
analyze_single.py ADDED
@@ -0,0 +1,1235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ analyze_single.py — Per-Model Disentanglement Analysis
4
+ =======================================================
5
+ Runs on cached hidden states only. Never touches the model.
6
+ Produces all per-model results, figures, and probe bundles.
7
+
8
+ Experiments:
9
+ [CORE] L1-ISTA probing: drift / uncertainty / correctness (true sparsity)
10
+ [IDEA-1] Drift vs Correctness dissociation — Cell B key result
11
+ [NEW-A] Null-space projection: project out uncertainty, re-probe drift
12
+ [NEW-B] Probe direction stability: L×L cosine matrix
13
+ [NEW-C] Logit lens: per-layer P(expected token) decay curve
14
+ [NEW-E] Temporal distance: drift score vs months since cutoff
15
+ [NEW-F] Calibration: reliability diagrams
16
+ [PERM] Permutation test at best layer
17
+ [SPARSE] Lambda-AUROC-neurons sparsity tradeoff
18
+ [REL] Per-relation breakdown
19
+
20
+ Outputs:
21
+ final_results.json Complete results summary
22
+ all_layer_results.json Per-layer JSON (resumable)
23
+ probe_bundle_{model}.npz Weight vectors + norms for cross-model
24
+ per_layer/layer_XX.json Individual layer results
25
+ figures/fig1..fig11.png Publication figures
26
+
27
+ Usage:
28
+ python analyze_single.py --model qwen25
29
+ python analyze_single.py --model llama31 --layers 20 21 22 23 24 25 26 27
30
+ python analyze_single.py --model qwen25 --skip_permutation --skip_logit_lens
31
+ """
32
+
33
+ import argparse
34
+ import json
35
+ import logging
36
+ import time
37
+ import warnings
38
+ from datetime import datetime
39
+ from pathlib import Path
40
+
41
+ import numpy as np
42
+ import torch
43
+ import yaml
44
+
45
+ warnings.filterwarnings("ignore")
46
+ logging.basicConfig(
47
+ level=logging.INFO,
48
+ format="%(asctime)s [%(levelname)s] %(message)s",
49
+ handlers=[logging.StreamHandler()])
50
+ logger = logging.getLogger(__name__)
51
+
52
+
53
+ # ─────────────────────────────────────────────────────────────────────────────
54
+ # CONFIG
55
+ # ─────────────────────────────────────────────────────────────────────────────
56
+
57
+ def load_config(config_path="models.yaml"):
58
+ with open(config_path) as f:
59
+ return yaml.safe_load(f)
60
+
61
+
62
+ # ─────────────────────────────────────────────────────────────────────────────
63
+ # L1-ISTA PROBE (true sparsity)
64
+ # ─────────────────────────────────────────────────────────────────────────────
65
+
66
+ def soft_threshold(w, lam):
67
+ return torch.sign(w) * torch.clamp(torch.abs(w) - lam, min=0.0)
68
+
69
+
70
+ class L1ProbeGPU:
71
+ """L1-regularised logistic regression via ISTA on GPU."""
72
+
73
+ def __init__(self, dim, lam=1e-3, max_iter=2000, tol=1e-6, device="cuda"):
74
+ self.lam = lam
75
+ self.max_iter = max_iter
76
+ self.tol = tol
77
+ self.device = device
78
+ self.w = torch.zeros(dim, device=device)
79
+ self.b = torch.zeros(1, device=device)
80
+ self.coef_ = None
81
+
82
+ def _loss_grad(self, w, b, X, y):
83
+ z = torch.clamp(X @ w + b, -30, 30)
84
+ p = torch.sigmoid(z)
85
+ L = -((y * torch.log(p + 1e-12)) +
86
+ (1 - y) * torch.log(1 - p + 1e-12)).mean()
87
+ e = p - y
88
+ return L, (X.T @ e) / len(y), e.mean().unsqueeze(0)
89
+
90
+ def fit(self, X, y):
91
+ w = torch.zeros(X.shape[1], device=self.device)
92
+ b = torch.zeros(1, device=self.device)
93
+ lr = 1.0
94
+ for _ in range(self.max_iter):
95
+ L, gw, gb = self._loss_grad(w, b, X, y)
96
+ for _ in range(30):
97
+ wt = soft_threshold(w - lr * gw, lr * self.lam)
98
+ bt = b - lr * gb
99
+ Lt, _, _ = self._loss_grad(wt, bt, X, y)
100
+ if Lt <= L + 1e-4:
101
+ break
102
+ lr *= 0.5
103
+ lr = min(lr * 1.05, 10.0)
104
+ if (wt - w).abs().max().item() < self.tol:
105
+ w, b = wt, bt
106
+ break
107
+ w, b = wt, bt
108
+ self.w, self.b = w, b
109
+ self.coef_ = [w.cpu().numpy()]
110
+ return self
111
+
112
+ @torch.no_grad()
113
+ def predict_proba_t(self, X):
114
+ p = torch.sigmoid(torch.clamp(X @ self.w + self.b, -30, 30))
115
+ p = p.cpu().numpy().ravel()
116
+ return np.column_stack([1 - p, p])
117
+
118
+
119
+ class ProbeWrapper:
120
+ """Wraps L1ProbeGPU with preprocessing and sklearn-like interface."""
121
+
122
+ def __init__(self, probe, mean, std, device):
123
+ self._p = probe
124
+ self._mean = mean
125
+ self._std = std
126
+ self._dev = device
127
+ self.coef_ = probe.coef_
128
+
129
+ def _to_gpu(self, X_np):
130
+ X = np.nan_to_num(X_np.astype(np.float32), nan=0., posinf=1e4, neginf=-1e4)
131
+ X = np.clip(X, -1e4, 1e4)
132
+ return torch.tensor((X - self._mean) / self._std,
133
+ dtype=torch.float32, device=self._dev)
134
+
135
+ def predict_proba(self, X_np):
136
+ return self._p.predict_proba_t(self._to_gpu(X_np))
137
+
138
+ @property
139
+ def w_np(self):
140
+ return self._p.w.cpu().numpy()
141
+
142
+ @property
143
+ def n_active(self):
144
+ return int(np.sum(self.w_np != 0))
145
+
146
+ @property
147
+ def norm_stats(self):
148
+ return {"mean": self._mean, "std": self._std}
149
+
150
+
151
+ def _preprocess(X_np):
152
+ X = np.nan_to_num(X_np.astype(np.float32), nan=0., posinf=1e4, neginf=-1e4)
153
+ X = np.clip(X, -1e4, 1e4)
154
+ m = X.mean(0, keepdims=True)
155
+ s = X.std(0, keepdims=True) + 1e-8
156
+ return X, m, s
157
+
158
+
159
+ def fit_probe(X_np, y_np, lam, device, max_iter=2000):
160
+ X, m, s = _preprocess(X_np)
161
+ Xt = torch.tensor((X - m) / s, dtype=torch.float32, device=device)
162
+ yt = torch.tensor(y_np.astype(np.float32), device=device)
163
+ p = L1ProbeGPU(Xt.shape[1], lam=lam, max_iter=max_iter, device=device)
164
+ p.fit(Xt, yt)
165
+ return ProbeWrapper(p, m, s, device)
166
+
167
+
168
+ def cv_auroc(X_np, y_np, lam, device, max_iter=500, n_splits=3):
169
+ from sklearn.model_selection import StratifiedKFold
170
+ from sklearn.metrics import roc_auc_score
171
+ min_c = min(int(y_np.sum()), int((1 - y_np).sum()))
172
+ k = min(n_splits, min_c)
173
+ if k < 2:
174
+ return 0.5
175
+ scores = []
176
+ for tr, va in StratifiedKFold(k, shuffle=True, random_state=42).split(X_np, y_np):
177
+ pw = fit_probe(X_np[tr], y_np[tr], lam, device, max_iter)
178
+ p = pw.predict_proba(X_np[va])[:, 1]
179
+ if len(np.unique(y_np[va])) > 1:
180
+ scores.append(roc_auc_score(y_np[va], p))
181
+ return float(np.mean(scores)) if scores else 0.5
182
+
183
+
184
+ def best_probe(X_np, y_np, device, lambda_grid, max_iter=2000, cv_max_iter=500):
185
+ best_au, best_lam = 0.0, lambda_grid[0]
186
+ for lam in lambda_grid:
187
+ au = cv_auroc(X_np, y_np, lam, device, max_iter=cv_max_iter)
188
+ if au > best_au:
189
+ best_au, best_lam = au, lam
190
+ return fit_probe(X_np, y_np, best_lam, device, max_iter), best_au, best_lam
191
+
192
+
193
+ # ─────────────────────────────────────────────────────────────────────────────
194
+ # UTILITIES
195
+ # ─────────────────────────────────────────────────────────────────────────────
196
+
197
+ def cosine(w1, w2):
198
+ n1, n2 = np.linalg.norm(w1), np.linalg.norm(w2)
199
+ return float(np.dot(w1, w2) / (n1 * n2 + 1e-12))
200
+
201
+
202
+ def jaccard(w1, w2):
203
+ s1 = set(np.where(w1 != 0)[0])
204
+ s2 = set(np.where(w2 != 0)[0])
205
+ u = len(s1 | s2)
206
+ return len(s1 & s2) / u if u > 0 else 0.0
207
+
208
+
209
+ # ─────────────────────────────────────────────────────────────────────────────
210
+ # PER-LAYER ANALYSIS
211
+ # ─────────────────────────────────────────────────────────────────────────────
212
+
213
+ def analyze_layer(layer, results, device, lambda_grid, max_iter=2000,
214
+ cv_max_iter=500):
215
+ from sklearn.metrics import roc_auc_score
216
+ t0 = time.time()
217
+
218
+ drifted = [r for r in results if r["is_drifted"]]
219
+ non_drifted = [r for r in results if not r["is_drifted"]]
220
+
221
+ X_all = np.array([r["hidden_states"][layer] for r in results])
222
+ y_drift = np.array([int(r["is_drifted"]) for r in results])
223
+ y_corr = np.array([int(r.get("correct", False)) for r in results])
224
+
225
+ # 1. Drift probe
226
+ dp, dp_au, dp_lam = best_probe(X_all, y_drift, device, lambda_grid,
227
+ max_iter, cv_max_iter)
228
+ w_d = dp.w_np
229
+
230
+ # 2. Uncertainty probe (on non-drifted only)
231
+ X_nd = np.array([r["hidden_states"][layer] for r in non_drifted])
232
+ ct = float(np.median([r["top_prob"] for r in non_drifted]))
233
+ y_unc = np.array([int(r["top_prob"] < ct) for r in non_drifted])
234
+ if y_unc.sum() < 5 or (len(y_unc) - y_unc.sum()) < 5:
235
+ ct = 0.5
236
+ y_unc = np.array([int(r["top_prob"] < ct) for r in non_drifted])
237
+ up, up_au, up_lam = best_probe(X_nd, y_unc, device, lambda_grid,
238
+ max_iter, cv_max_iter)
239
+ w_u = up.w_np
240
+
241
+ # 3. Correctness probe
242
+ cp, cp_au, cp_lam = None, 0.5, 0.0
243
+ w_c = np.zeros_like(w_d)
244
+ nc, nw = int(y_corr.sum()), int((1 - y_corr).sum())
245
+ if nc >= 10 and nw >= 10:
246
+ cp, cp_au, cp_lam = best_probe(X_all, y_corr, device, lambda_grid,
247
+ max_iter, cv_max_iter)
248
+ w_c = cp.w_np
249
+
250
+ # 4. Cosines + Jaccard
251
+ cos_du = cosine(w_d, w_u)
252
+ cos_dc = cosine(w_d, w_c)
253
+ cos_uc = cosine(w_u, w_c)
254
+ jac_du = jaccard(w_d, w_u)
255
+ jac_dc = jaccard(w_d, w_c)
256
+
257
+ # 5. [NEW-A] Null-space projection
258
+ ns_au = 0.5
259
+ wu_n = np.linalg.norm(w_u)
260
+ if wu_n > 1e-8:
261
+ u_hat = w_u / wu_n
262
+ X_perp = X_all - (X_all @ u_hat)[:, None] * u_hat[None, :]
263
+ _, ns_au, _ = best_probe(X_perp, y_drift, device, lambda_grid,
264
+ max_iter, cv_max_iter)
265
+
266
+ # 6. Cell analysis (Idea 1)
267
+ cells = {}
268
+ for cname, samps in [
269
+ ("A_confident_stable", [r for r in non_drifted if r["top_prob"] >= ct]),
270
+ ("B_confident_drifted", [r for r in drifted if r["top_prob"] >= ct]),
271
+ ("C_uncertain_stable", [r for r in non_drifted if r["top_prob"] < ct]),
272
+ ("D_uncertain_drifted", [r for r in drifted if r["top_prob"] < ct]),
273
+ ]:
274
+ if not samps:
275
+ cells[cname] = {"n": 0}
276
+ continue
277
+ Xc = np.array([r["hidden_states"][layer] for r in samps])
278
+ p_d = dp.predict_proba(Xc)[:, 1]
279
+ p_c = cp.predict_proba(Xc)[:, 1] if cp else np.full(len(samps), 0.5)
280
+ p_u = up.predict_proba(Xc)[:, 1]
281
+ cells[cname] = {
282
+ "n": len(samps),
283
+ "drift_mean": float(np.mean(p_d)),
284
+ "drift_std": float(np.std(p_d)),
285
+ "correct_mean": float(np.mean(p_c)),
286
+ "correct_std": float(np.std(p_c)),
287
+ "uncertainty_mean": float(np.mean(p_u)),
288
+ "uncertainty_std": float(np.std(p_u)),
289
+ "drift_flag_rate": float(np.mean(p_d > 0.5)),
290
+ "correct_flag_rate": float(np.mean(p_c > 0.5)),
291
+ }
292
+
293
+ # 7. Per-relation probing
294
+ per_rel = {}
295
+ for rel in sorted(set(r.get("relation", "unknown") for r in results)):
296
+ rs = [r for r in results if r.get("relation", "unknown") == rel]
297
+ nd_ = sum(1 for r in rs if r["is_drifted"])
298
+ ns_ = len(rs) - nd_
299
+ if nd_ < 5 or ns_ < 5:
300
+ continue
301
+ Xr = np.array([r["hidden_states"][layer] for r in rs])
302
+ ydr = np.array([int(r["is_drifted"]) for r in rs])
303
+ ycr = np.array([int(r.get("correct", False)) for r in rs])
304
+ try:
305
+ au_d = roc_auc_score(ydr, dp.predict_proba(Xr)[:, 1])
306
+ except Exception:
307
+ au_d = 0.5
308
+ try:
309
+ au_c = (roc_auc_score(ycr, cp.predict_proba(Xr)[:, 1])
310
+ if cp else 0.5)
311
+ except Exception:
312
+ au_c = 0.5
313
+ per_rel[rel] = {"drift_auroc": au_d, "correct_auroc": au_c,
314
+ "n_drifted": nd_, "n_stable": ns_}
315
+
316
+ return {
317
+ "layer": layer,
318
+ "drift_auroc": dp_au,
319
+ "uncertainty_auroc": up_au,
320
+ "correctness_auroc": cp_au,
321
+ "drift_lam": dp_lam,
322
+ "cos_du": cos_du,
323
+ "cos_dc": cos_dc,
324
+ "cos_uc": cos_uc,
325
+ "jaccard_du": jac_du,
326
+ "jaccard_dc": jac_dc,
327
+ "n_active_drift": dp.n_active,
328
+ "n_active_unc": up.n_active,
329
+ "n_active_corr": int(np.sum(w_c != 0)),
330
+ "null_space_drift_auroc": ns_au,
331
+ "conf_threshold": ct,
332
+ "cells": cells,
333
+ "per_relation": per_rel,
334
+ "elapsed_s": time.time() - t0,
335
+ # Kept in memory for figures/bundle — stripped before JSON save
336
+ "_w_drift": w_d, "_w_unc": w_u, "_w_corr": w_c,
337
+ "_probes": {"drift": dp, "uncertainty": up, "correctness": cp},
338
+ }
339
+
340
+
341
+ # ─────────────────────────────────────────────────────────────────────────────
342
+ # [NEW-B] PROBE DIRECTION STABILITY
343
+ # ─────────────────────────────────────────────────────────────────────────────
344
+
345
+ def probe_direction_stability(all_lr):
346
+ layers = sorted(k for k in all_lr if "_w_drift" in all_lr[k])
347
+ n = len(layers)
348
+ mat = np.eye(n)
349
+ for i, l1 in enumerate(layers):
350
+ for j, l2 in enumerate(layers):
351
+ if i != j:
352
+ mat[i, j] = cosine(all_lr[l1]["_w_drift"],
353
+ all_lr[l2]["_w_drift"])
354
+ return layers, mat
355
+
356
+
357
+ # ─────────────────────────────────────────────────────────────────────────────
358
+ # [NEW-C] LOGIT LENS
359
+ # ─────────────────────────────────────────────────────────────────────────────
360
+
361
+ def logit_lens_analysis(results, model_dir, model_key, device):
362
+ lm_path = Path(model_dir) / f"lm_head_{model_key}.npz"
363
+ if not lm_path.exists():
364
+ logger.warning(f"lm_head not found: {lm_path}")
365
+ return None
366
+
367
+ lm = np.load(str(lm_path), allow_pickle=True)
368
+ lm_w = torch.tensor(lm["lm_head"], dtype=torch.float32, device=device)
369
+ ln_w = torch.tensor(lm["ln_weight"], dtype=torch.float32, device=device)
370
+ ln_b = torch.tensor(lm["ln_bias"], dtype=torch.float32, device=device)
371
+
372
+ layers = sorted(results[0]["hidden_states"].keys())
373
+ drifted = [r for r in results if r["is_drifted"]]
374
+ stable = [r for r in results if not r["is_drifted"]]
375
+
376
+ def layer_prob(samps, layer):
377
+ probs = []
378
+ for r in samps:
379
+ h = torch.tensor(r["hidden_states"][layer],
380
+ dtype=torch.float32, device=device)
381
+ h_n = (h - h.mean()) / (h.std() + 1e-5) * ln_w + ln_b
382
+ lgts = torch.clamp(lm_w @ h_n, -60, 60)
383
+ p = torch.softmax(lgts, dim=-1)
384
+ probs.append(float(p.max().item()))
385
+ return float(np.mean(probs)) if probs else 0.0
386
+
387
+ data = {"layers": layers, "drifted": [], "stable": []}
388
+ for l in layers:
389
+ data["drifted"].append(layer_prob(drifted, l))
390
+ data["stable"].append(layer_prob(stable, l))
391
+ if (l + 1) % 7 == 0:
392
+ logger.info(f" logit lens L{l}: "
393
+ f"drifted={data['drifted'][-1]:.4f} "
394
+ f"stable={data['stable'][-1]:.4f}")
395
+ return data
396
+
397
+
398
+ # ─────────────────────────────────────────────────────────────────────────────
399
+ # [NEW-E] TEMPORAL DISTANCE
400
+ # ─────────────────────────────────────────────────────────────────────────────
401
+
402
+ def temporal_distance_analysis(results, cutoff_months):
403
+ bins, scores = [], []
404
+ for r in results:
405
+ if not r["is_drifted"]:
406
+ continue
407
+ cd = r.get("drift_date")
408
+ if cd in (None, "", "None"):
409
+ continue
410
+ try:
411
+ pts = str(cd).split("T")[0].split("-")
412
+ y = int(pts[0])
413
+ m = int(pts[1]) if len(pts) > 1 else 6
414
+ dist = (y - 2020) * 12 + m - cutoff_months
415
+ except Exception:
416
+ continue
417
+ if not (-60 <= dist <= 60):
418
+ continue
419
+ bins.append(dist)
420
+ scores.append(r.get("_drift_score", 0.5))
421
+
422
+ if len(bins) < 20:
423
+ return None
424
+ bins, scores = np.array(bins), np.array(scores)
425
+ corr = float(np.corrcoef(bins, scores)[0, 1])
426
+ logger.info(f" Temporal corr: {corr:.4f} ({len(bins)} samples)")
427
+ return {"bins": bins.tolist(), "scores": scores.tolist(),
428
+ "correlation": corr}
429
+
430
+
431
+ # ─────────────────────────────────────────────────────────────────────────────
432
+ # PERMUTATION TEST
433
+ # ─────────────────────────────────────────────────────────────────────────────
434
+
435
+ def permutation_test(results, best_layer, n_perms, device, lambda_grid):
436
+ from sklearn.metrics import roc_auc_score
437
+ logger.info(f"Permutation test ({n_perms}) at layer {best_layer}")
438
+ X = np.array([r["hidden_states"][best_layer] for r in results])
439
+ y = np.array([int(r["is_drifted"]) for r in results])
440
+ _, true_au, _ = best_probe(X, y, device, lambda_grid, cv_max_iter=300)
441
+ logger.info(f" True AUROC: {true_au:.4f}")
442
+
443
+ nulls = []
444
+ for i in range(n_perms):
445
+ y_perm = np.random.permutation(y)
446
+ pw = fit_probe(X, y_perm, 1e-3, device, max_iter=300)
447
+ p_n = pw.predict_proba(X)[:, 1]
448
+ try:
449
+ nulls.append(roc_auc_score(y_perm, p_n))
450
+ except Exception:
451
+ nulls.append(0.5)
452
+ if (i + 1) % 250 == 0:
453
+ logger.info(f" {i+1}/{n_perms} null_mean={np.mean(nulls):.4f}")
454
+
455
+ nulls = np.array(nulls)
456
+ p_val = float(np.mean(nulls >= true_au))
457
+ logger.info(f" p={p_val:.6f} null_mean={nulls.mean():.4f}")
458
+ return {"true_auroc": float(true_au), "null_mean": float(nulls.mean()),
459
+ "null_std": float(nulls.std()), "p_value": p_val, "n": n_perms}
460
+
461
+
462
+ # ─────────────────────────────────────────────────────────────────────────────
463
+ # SPARSITY CURVE
464
+ # ─────────────────────────────────────────────────────────────────────────────
465
+
466
+ def sparsity_curve(results, best_layer, device, sparsity_lambdas):
467
+ logger.info(f"Sparsity curve at layer {best_layer}")
468
+ X = np.array([r["hidden_states"][best_layer] for r in results])
469
+ y = np.array([int(r["is_drifted"]) for r in results])
470
+ out = []
471
+ for lam in sparsity_lambdas:
472
+ pw = fit_probe(X, y, lam, device, max_iter=500)
473
+ au = cv_auroc(X, y, lam, device, max_iter=300)
474
+ out.append({"lambda": lam, "n_active": pw.n_active, "auroc": float(au)})
475
+ logger.info(f" lam={lam:.2e} active={pw.n_active:>5d} AUROC={au:.4f}")
476
+ return out
477
+
478
+
479
+ # ─────────────────────────────────────────────────────────────────────────────
480
+ # PROBE BUNDLE EXPORT (for cross-model analysis)
481
+ # ─────────────────────────────────────────────────────────────────────────────
482
+
483
+ def export_probe_bundle(all_lr, best_layer, results, model_key, out_dir):
484
+ """Export probe weight vectors + normalization stats for cross_model.py."""
485
+ bl = all_lr[best_layer]
486
+ X = np.array([r["hidden_states"][best_layer] for r in results])
487
+ _, m, s = _preprocess(X)
488
+
489
+ bundle = {
490
+ "model_key": model_key,
491
+ "best_layer": best_layer,
492
+ "hidden_dim": X.shape[1],
493
+ "n_samples": len(results),
494
+ "w_drift": bl["_w_drift"],
495
+ "w_unc": bl["_w_unc"],
496
+ "w_corr": bl["_w_corr"],
497
+ "norm_mean": m,
498
+ "norm_std": s,
499
+ "drift_auroc": bl["drift_auroc"],
500
+ "n_active_drift": bl["n_active_drift"],
501
+ "cos_du": bl["cos_du"],
502
+ "cos_dc": bl["cos_dc"],
503
+ }
504
+ path = Path(out_dir) / f"probe_bundle_{model_key}.npz"
505
+ np.savez_compressed(str(path), **{k: v for k, v in bundle.items()})
506
+ logger.info(f" Probe bundle: {path}")
507
+
508
+
509
+ # ─────────────────────────────────────────────────────────────────────────────
510
+ # FIGURES (same as v3 — consolidated)
511
+ # ─────────────────────────────────────────────────────────────────────────────
512
+
513
+ def save_figures(all_lr, model_dir, model_key, results,
514
+ stability_data=None, lens_data=None,
515
+ sparsity_data=None, temporal_data=None):
516
+ import matplotlib
517
+ matplotlib.use("Agg")
518
+ import matplotlib.pyplot as plt
519
+
520
+ fig_dir = Path(model_dir) / "figures"
521
+ fig_dir.mkdir(parents=True, exist_ok=True)
522
+
523
+ layers = sorted(k for k in all_lr if "drift_auroc" in all_lr[k])
524
+ if len(layers) < 2:
525
+ return
526
+ best = max(layers, key=lambda l: all_lr[l]["drift_auroc"])
527
+
528
+ P = {"drift": "#e74c3c", "unc": "#3498db", "corr": "#2ecc71",
529
+ "null": "#9b59b6", "neu": "#e67e22"}
530
+
531
+ # ── Fig 1: 6-panel dashboard ──────────────────────────────────────────
532
+ fig, axes = plt.subplots(2, 3, figsize=(22, 12))
533
+ fig.suptitle(f"[{model_key}] Disentanglement Dashboard", fontsize=16,
534
+ fontweight="bold")
535
+
536
+ ax = axes[0, 0]
537
+ for key, lbl, col, ls in [
538
+ ("drift_auroc", "Drift", P["drift"], "-"),
539
+ ("uncertainty_auroc", "Uncertainty", P["unc"], "--"),
540
+ ("correctness_auroc", "Correctness", P["corr"], "-."),
541
+ ("null_space_drift_auroc", "Drift (null-space)", P["null"], ":"),
542
+ ]:
543
+ ax.plot(layers, [all_lr[l].get(key, np.nan) for l in layers],
544
+ f"o{ls}", color=col, lw=2, ms=5, label=lbl)
545
+ ax.axvline(best, color="gray", ls="--", alpha=0.4)
546
+ ax.set(xlabel="Layer", ylabel="AUROC", title="Probe AUROC by Layer",
547
+ ylim=(0.4, 1.05))
548
+ ax.legend(fontsize=9)
549
+ ax.grid(alpha=0.3)
550
+
551
+ ax = axes[0, 1]
552
+ for key, lbl, col in [
553
+ ("cos_du", "|cos(drift, unc)|", P["drift"]),
554
+ ("cos_dc", "|cos(drift, corr)|", P["corr"]),
555
+ ("cos_uc", "|cos(unc, corr)|", P["unc"]),
556
+ ]:
557
+ ax.plot(layers, [abs(all_lr[l].get(key, 0)) for l in layers],
558
+ "o-", color=col, lw=2, ms=5, label=lbl)
559
+ ax.axhline(0.3, color="gray", ls="--", alpha=0.4, label="0.3 threshold")
560
+ ax.set(xlabel="Layer", ylabel="|Cosine Similarity|",
561
+ title="3-Way Disentanglement", ylim=(0, 1.0))
562
+ ax.legend(fontsize=9)
563
+ ax.grid(alpha=0.3)
564
+
565
+ ax = axes[0, 2]
566
+ ax.plot(layers, [all_lr[l].get("n_active_drift", 0) for l in layers],
567
+ "o-", color=P["drift"], lw=2, ms=5, label="Drift")
568
+ ax.plot(layers, [all_lr[l].get("n_active_unc", 0) for l in layers],
569
+ "s-", color=P["unc"], lw=2, ms=5, label="Unc")
570
+ ax.plot(layers, [all_lr[l].get("n_active_corr", 0) for l in layers],
571
+ "^-", color=P["corr"], lw=2, ms=5, label="Corr")
572
+ ax2 = ax.twinx()
573
+ ax2.plot(layers, [all_lr[l].get("jaccard_du", 0) for l in layers],
574
+ "D--", color=P["null"], lw=2, ms=4, label="J(d,u)")
575
+ ax2.plot(layers, [all_lr[l].get("jaccard_dc", 0) for l in layers],
576
+ "P--", color=P["neu"], lw=2, ms=4, label="J(d,c)")
577
+ ax.set(xlabel="Layer", ylabel="Active neurons",
578
+ title="True Sparsity & Jaccard")
579
+ ax2.set_ylabel("Jaccard")
580
+ ax.legend(loc="upper left", fontsize=8)
581
+ ax2.legend(loc="upper right", fontsize=8)
582
+ ax.grid(alpha=0.3)
583
+
584
+ ax = axes[1, 0]
585
+ d_au = [all_lr[l]["drift_auroc"] for l in layers]
586
+ ns_au = [all_lr[l].get("null_space_drift_auroc", np.nan) for l in layers]
587
+ ax.plot(layers, d_au, "o-", color=P["drift"], lw=2.5, ms=7, label="Full")
588
+ ax.plot(layers, ns_au, "s--", color=P["null"], lw=2, ms=7,
589
+ label="Null-space of unc")
590
+ ax.fill_between(layers, ns_au, d_au, alpha=0.15, color=P["null"])
591
+ ax.set(xlabel="Layer", ylabel="AUROC",
592
+ title="[NEW-A] Null-Space Projection", ylim=(0.4, 1.05))
593
+ ax.legend(fontsize=10)
594
+ ax.grid(alpha=0.3)
595
+
596
+ ax = axes[1, 1]
597
+ bl = best
598
+ cells = all_lr[bl].get("cells", {})
599
+ cnames = ["A_confident_stable", "B_confident_drifted",
600
+ "C_uncertain_stable", "D_uncertain_drifted"]
601
+ if cells and any(cells.get(c, {}).get("n", 0) > 0 for c in cnames):
602
+ ns_ = [cells.get(c, {}).get("n", 0) for c in cnames]
603
+ dm_ = [cells.get(c, {}).get("drift_mean", 0) for c in cnames]
604
+ cm_ = [cells.get(c, {}).get("correct_mean", 0) for c in cnames]
605
+ x = np.arange(4)
606
+ w = 0.35
607
+ ax.bar(x - w / 2, dm_, w, color=P["drift"], label="Drift",
608
+ edgecolor="black", lw=0.5)
609
+ ax.bar(x + w / 2, cm_, w, color=P["corr"], label="Correctness",
610
+ edgecolor="black", lw=0.5)
611
+ ax.set_xticks(x)
612
+ ax.set_xticklabels([f"{c[:3]}\nn={ns_[i]}" for i, c in enumerate(cnames)],
613
+ fontsize=9)
614
+ ax.axhline(0.5, color="gray", ls="--", alpha=0.5)
615
+ ax.set(ylabel="Mean score", title="[IDEA-1] Cell B Dissociation",
616
+ ylim=(0, 1.1))
617
+ ax.legend(fontsize=9)
618
+ ax.grid(alpha=0.3, axis="y")
619
+
620
+ ax = axes[1, 2]
621
+ if lens_data:
622
+ ls_ = lens_data["layers"]
623
+ ax.plot(ls_, lens_data["stable"], "o-", color=P["unc"], lw=2,
624
+ label="Stable")
625
+ ax.plot(ls_, lens_data["drifted"], "s--", color=P["drift"], lw=2,
626
+ label="Drifted")
627
+ ax.fill_between(ls_, lens_data["drifted"], lens_data["stable"],
628
+ alpha=0.2, color=P["drift"])
629
+ ax.axvline(best, color="gray", ls=":", lw=1.5)
630
+ ax.set(xlabel="Layer", ylabel="P(expected token)",
631
+ title="[NEW-C] Logit Lens")
632
+ ax.legend(fontsize=10)
633
+ ax.grid(alpha=0.3)
634
+ else:
635
+ ax.text(0.5, 0.5, "Logit lens\n(enable with flag)",
636
+ ha="center", va="center", transform=ax.transAxes)
637
+
638
+ plt.tight_layout()
639
+ plt.savefig(fig_dir / "fig1_dashboard.png", dpi=300, bbox_inches="tight")
640
+ plt.close()
641
+ logger.info(" fig1 saved")
642
+
643
+ # ── Fig 2: Idea 1 deep dive ──────────────────────────────────────────
644
+ if cells and any(cells.get(c, {}).get("n", 0) > 0 for c in cnames):
645
+ fig, axes2 = plt.subplots(1, 3, figsize=(21, 7))
646
+ fig.suptitle(f"[{model_key}] IDEA-1: Cell B Dissociation (Layer {best})",
647
+ fontsize=14, fontweight="bold")
648
+ clrs = [P["unc"], P["drift"], "#95a5a6", P["neu"]]
649
+ ns_ = [cells.get(c, {}).get("n", 0) for c in cnames]
650
+ xlbls = [f"Cell {chr(65+i)}\nn={ns_[i]}" for i in range(4)]
651
+ x = np.arange(4)
652
+ w = 0.6
653
+ for ax, key, ekey, title in [
654
+ (axes2[0], "drift_mean", "drift_std", "Drift Probe"),
655
+ (axes2[1], "correct_mean", "correct_std", "Correctness Probe"),
656
+ (axes2[2], "uncertainty_mean", "uncertainty_std", "Uncertainty Probe"),
657
+ ]:
658
+ vals = [cells.get(c, {}).get(key, 0) for c in cnames]
659
+ errs = [cells.get(c, {}).get(ekey, 0) for c in cnames]
660
+ ax.bar(x, vals, w, yerr=errs, capsize=7, color=clrs,
661
+ edgecolor="black", lw=0.7, alpha=0.85)
662
+ if vals[1] > 0:
663
+ ax.annotate("Cell B", xy=(1, vals[1] + errs[1]),
664
+ xytext=(1.6, vals[1] + errs[1] + 0.12),
665
+ arrowprops=dict(arrowstyle="->", color="red"),
666
+ fontsize=9, color="red", fontweight="bold")
667
+ ax.axhline(0.5, color="gray", ls="--", alpha=0.6)
668
+ ax.set_xticks(x)
669
+ ax.set_xticklabels(xlbls, fontsize=9)
670
+ ax.set(ylabel="Mean score", title=title, ylim=(0, 1.2))
671
+ ax.grid(alpha=0.3, axis="y")
672
+ plt.tight_layout()
673
+ plt.savefig(fig_dir / "fig2_idea1_dissociation.png",
674
+ dpi=300, bbox_inches="tight")
675
+ plt.close()
676
+ logger.info(" fig2 saved")
677
+
678
+ # ── Fig 3: 3×3 cosine heatmap ────────────────────────────────────────
679
+ bl = best
680
+ mat = np.array([
681
+ [1.0, all_lr[bl]["cos_du"], all_lr[bl]["cos_dc"]],
682
+ [all_lr[bl]["cos_du"], 1.0, all_lr[bl]["cos_uc"]],
683
+ [all_lr[bl]["cos_dc"], all_lr[bl]["cos_uc"], 1.0],
684
+ ])
685
+ fig, ax = plt.subplots(figsize=(8, 7))
686
+ im = ax.imshow(mat, cmap="RdBu_r", vmin=-1, vmax=1)
687
+ lbls = ["Drift", "Uncertainty", "Correctness"]
688
+ ax.set_xticks(range(3))
689
+ ax.set_yticks(range(3))
690
+ ax.set_xticklabels(lbls, fontsize=13)
691
+ ax.set_yticklabels(lbls, fontsize=13)
692
+ for i in range(3):
693
+ for j in range(3):
694
+ c = "white" if abs(mat[i, j]) > 0.5 else "black"
695
+ ax.text(j, i, f"{mat[i,j]:+.3f}", ha="center", va="center",
696
+ fontsize=14, fontweight="bold", color=c)
697
+ ax.set_title(f"[{model_key}] Cosine Matrix — Layer {bl}", fontsize=13)
698
+ plt.colorbar(im, ax=ax, shrink=0.8)
699
+ plt.tight_layout()
700
+ plt.savefig(fig_dir / "fig3_cosine_matrix.png", dpi=300, bbox_inches="tight")
701
+ plt.close()
702
+ logger.info(" fig3 saved")
703
+
704
+ # ── Fig 4: PCA ───────────────────────────────────────────────────────
705
+ if "_w_drift" in all_lr.get(best, {}):
706
+ from sklearn.decomposition import PCA
707
+ bl = best
708
+ X_b = np.array([r["hidden_states"][bl] for r in results])
709
+ is_d = np.array([r["is_drifted"] for r in results])
710
+ is_c = np.array([r.get("correct", False) for r in results])
711
+ pca = PCA(n_components=2)
712
+ X2 = pca.fit_transform(X_b)
713
+ Xsc = (X_b - X_b.mean(0)) / (X_b.std(0) + 1e-8)
714
+ w_d = all_lr[bl]["_w_drift"]
715
+ w_u = all_lr[bl]["_w_unc"]
716
+ w_c = all_lr[bl]["_w_corr"]
717
+ nd, nu, nc_ = (np.linalg.norm(w_d), np.linalg.norm(w_u),
718
+ np.linalg.norm(w_c))
719
+ pd_ = Xsc @ (w_d / (nd + 1e-8))
720
+ pu_ = Xsc @ (w_u / (nu + 1e-8))
721
+ pc_ = (Xsc @ (w_c / (nc_ + 1e-8)) if nc_ > 1e-8
722
+ else np.zeros(len(results)))
723
+
724
+ fig, axes3 = plt.subplots(1, 3, figsize=(22, 7))
725
+ fig.suptitle(f"[{model_key}] Geometry — Layer {bl}", fontsize=14,
726
+ fontweight="bold")
727
+ ax = axes3[0]
728
+ ax.scatter(X2[~is_d, 0], X2[~is_d, 1], c=P["unc"], alpha=0.3,
729
+ s=20, label="Stable", edgecolors="none")
730
+ ax.scatter(X2[is_d, 0], X2[is_d, 1], c=P["drift"], alpha=0.7,
731
+ s=50, label="Drifted", edgecolors="black", lw=0.3,
732
+ marker="*")
733
+ ax.set(xlabel=f"PC1 ({pca.explained_variance_ratio_[0]:.1%})",
734
+ ylabel=f"PC2 ({pca.explained_variance_ratio_[1]:.1%})",
735
+ title="PCA")
736
+ ax.legend()
737
+ ax.grid(alpha=0.2)
738
+
739
+ ax = axes3[1]
740
+ ax.scatter(pd_[~is_d], pu_[~is_d], c=P["unc"], alpha=0.3, s=20,
741
+ label="Stable", edgecolors="none")
742
+ ax.scatter(pd_[is_d], pu_[is_d], c=P["drift"], alpha=0.7, s=50,
743
+ label="Drifted", edgecolors="black", lw=0.3, marker="*")
744
+ ax.axhline(0, color="gray", ls="--", alpha=0.3)
745
+ ax.axvline(0, color="gray", ls="--", alpha=0.3)
746
+ ax.set(xlabel="Drift direction", ylabel="Uncertainty direction",
747
+ title=f"cos={all_lr[bl]['cos_du']:+.3f}")
748
+ ax.legend()
749
+ ax.grid(alpha=0.2)
750
+
751
+ ax = axes3[2]
752
+ ax.scatter(pd_[is_c], pc_[is_c], c=P["corr"], alpha=0.5, s=20,
753
+ label="Correct", edgecolors="none")
754
+ ax.scatter(pd_[~is_c], pc_[~is_c], c=P["drift"], alpha=0.5, s=20,
755
+ label="Incorrect", edgecolors="none")
756
+ ax.axhline(0, color="gray", ls="--", alpha=0.3)
757
+ ax.axvline(0, color="gray", ls="--", alpha=0.3)
758
+ ax.set(xlabel="Drift direction", ylabel="Correctness direction",
759
+ title=f"cos={all_lr[bl]['cos_dc']:+.3f}")
760
+ ax.legend()
761
+ ax.grid(alpha=0.2)
762
+ plt.tight_layout()
763
+ plt.savefig(fig_dir / "fig4_pca_projection.png",
764
+ dpi=300, bbox_inches="tight")
765
+ plt.close()
766
+ logger.info(" fig4 saved")
767
+
768
+ # ── Fig 5: Direction stability ───────────────────────────────────────
769
+ if stability_data:
770
+ sl, sm = stability_data
771
+ fig, axes4 = plt.subplots(1, 2, figsize=(18, 7))
772
+ fig.suptitle(f"[{model_key}] [NEW-B] Direction Stability", fontsize=14,
773
+ fontweight="bold")
774
+ ax = axes4[0]
775
+ im = ax.imshow(sm, cmap="RdBu_r", vmin=-1, vmax=1, aspect="auto")
776
+ step = max(1, len(sl) // 8)
777
+ tp = list(range(0, len(sl), step))
778
+ ax.set_xticks(tp)
779
+ ax.set_yticks(tp)
780
+ ax.set_xticklabels([sl[i] for i in tp])
781
+ ax.set_yticklabels([sl[i] for i in tp])
782
+ ax.set(xlabel="Layer", ylabel="Layer",
783
+ title="cos(w_drift_i, w_drift_j)")
784
+ plt.colorbar(im, ax=ax, shrink=0.8)
785
+
786
+ ax = axes4[1]
787
+ mean_c = [np.mean([sm[i, j] for j in range(len(sl)) if j != i])
788
+ for i in range(len(sl))]
789
+ d_au_s = [all_lr[l]["drift_auroc"] for l in sl]
790
+ ax.plot(sl, mean_c, "o-", color=P["drift"], lw=2, ms=6,
791
+ label="Mean cross-layer cos")
792
+ ax3 = ax.twinx()
793
+ ax3.plot(sl, d_au_s, "s--", color="#7f8c8d", lw=2, ms=6,
794
+ label="Drift AUROC")
795
+ ax.set(xlabel="Layer", ylabel="Mean cosine",
796
+ title="Stability vs AUROC")
797
+ ax3.set_ylabel("AUROC")
798
+ ax.legend(loc="lower left", fontsize=9)
799
+ ax3.legend(loc="lower right", fontsize=9)
800
+ ax.grid(alpha=0.3)
801
+ plt.tight_layout()
802
+ plt.savefig(fig_dir / "fig5_probe_stability.png",
803
+ dpi=300, bbox_inches="tight")
804
+ plt.close()
805
+ logger.info(" fig5 saved")
806
+
807
+ # ── Fig 6: Logit lens ────────────────────────────────────────────────
808
+ if lens_data:
809
+ fig, axes5 = plt.subplots(1, 2, figsize=(16, 6))
810
+ fig.suptitle(f"[{model_key}] [NEW-C] Logit Lens", fontsize=14,
811
+ fontweight="bold")
812
+ ls_ = lens_data["layers"]
813
+ axes5[0].plot(ls_, lens_data["stable"], "o-", color=P["unc"], lw=2.5,
814
+ ms=7, label="Stable")
815
+ axes5[0].plot(ls_, lens_data["drifted"], "s--", color=P["drift"],
816
+ lw=2.5, ms=7, label="Drifted")
817
+ axes5[0].fill_between(ls_, lens_data["drifted"], lens_data["stable"],
818
+ alpha=0.2, color=P["drift"])
819
+ axes5[0].axvline(best, color="gray", ls=":", lw=1.5)
820
+ axes5[0].set(xlabel="Layer", ylabel="P(expected token)",
821
+ title="Per-layer probability")
822
+ axes5[0].legend()
823
+ axes5[0].grid(alpha=0.3)
824
+
825
+ gap = [s - d for s, d in zip(lens_data["stable"], lens_data["drifted"])]
826
+ axes5[1].plot(ls_, gap, "o-", color=P["neu"], lw=2.5, ms=7)
827
+ axes5[1].fill_between(ls_, 0, gap, alpha=0.3, color=P["neu"])
828
+ axes5[1].axhline(0, color="gray", ls="--", alpha=0.5)
829
+ axes5[1].axvline(best, color=P["drift"], ls=":", lw=1.5)
830
+ axes5[1].set(xlabel="Layer", ylabel="Stable − Drifted gap",
831
+ title="Knowledge gap curve")
832
+ axes5[1].grid(alpha=0.3)
833
+ plt.tight_layout()
834
+ plt.savefig(fig_dir / "fig6_logit_lens.png",
835
+ dpi=300, bbox_inches="tight")
836
+ plt.close()
837
+ logger.info(" fig6 saved")
838
+
839
+ # ── Fig 7: Sparsity tradeoff ─────────────────────────────────────────
840
+ if sparsity_data:
841
+ fig, ax1 = plt.subplots(figsize=(10, 6))
842
+ lams = [d["lambda"] for d in sparsity_data]
843
+ nus = [d["n_active"] for d in sparsity_data]
844
+ aus = [d["auroc"] for d in sparsity_data]
845
+ ax1.semilogx(lams, aus, "o-", color=P["drift"], lw=2.5, ms=8,
846
+ label="AUROC")
847
+ ax4 = ax1.twinx()
848
+ ax4.semilogx(lams, nus, "s--", color=P["unc"], lw=2.5, ms=8,
849
+ label="Active neurons")
850
+ ax1.set(xlabel="L1 lambda", ylabel="AUROC",
851
+ title=f"[{model_key}] Sparsity Tradeoff")
852
+ ax4.set_ylabel("Active neurons")
853
+ ax1.legend(loc="lower left")
854
+ ax4.legend(loc="upper right")
855
+ ax1.grid(alpha=0.3, which="both")
856
+ plt.tight_layout()
857
+ plt.savefig(fig_dir / "fig7_sparsity_tradeoff.png",
858
+ dpi=300, bbox_inches="tight")
859
+ plt.close()
860
+ logger.info(" fig7 saved")
861
+
862
+ # ── Fig 8: Calibration ───────────────────────────────────────────────
863
+ bl = best
864
+ pr_objs = all_lr[bl].get("_probes", {})
865
+ X_b = np.array([r["hidden_states"][bl] for r in results])
866
+ is_d = np.array([int(r["is_drifted"]) for r in results])
867
+ is_c = np.array([int(r.get("correct", False)) for r in results])
868
+ fig, axes6 = plt.subplots(1, 3, figsize=(18, 6))
869
+ fig.suptitle(f"[{model_key}] Reliability Diagrams (Layer {bl})",
870
+ fontsize=14, fontweight="bold")
871
+ for ax, pkey, yt, lbl, col in [
872
+ (axes6[0], "drift", is_d, "Drift", P["drift"]),
873
+ (axes6[1], "correctness", is_c, "Correctness", P["corr"]),
874
+ ]:
875
+ probe = pr_objs.get(pkey)
876
+ if probe is None:
877
+ ax.set_visible(False)
878
+ continue
879
+ scores = probe.predict_proba(X_b)[:, 1]
880
+ bins_e = np.linspace(0, 1, 11)
881
+ mid = (bins_e[:-1] + bins_e[1:]) / 2
882
+ frac = [float(yt[(scores >= lo) & (scores < hi)].mean())
883
+ if ((scores >= lo) & (scores < hi)).sum() > 0 else np.nan
884
+ for lo, hi in zip(bins_e[:-1], bins_e[1:])]
885
+ ax.plot([0, 1], [0, 1], "k--", lw=1.5, label="Perfect")
886
+ ax.bar(mid, frac, 0.08, alpha=0.6, color=col, edgecolor="black",
887
+ lw=0.5, label=lbl)
888
+ ax.set(xlabel="Predicted prob", ylabel="Fraction positive",
889
+ title=lbl, xlim=(0, 1), ylim=(0, 1))
890
+ ax.legend()
891
+ ax.grid(alpha=0.3)
892
+
893
+ ax = axes6[2]
894
+ probe = pr_objs.get("drift")
895
+ if probe:
896
+ s = probe.predict_proba(X_b)[:, 1]
897
+ ax.hist(s[is_d == 0], 30, alpha=0.6, color=P["unc"], density=True,
898
+ label="Stable")
899
+ ax.hist(s[is_d == 1], 30, alpha=0.6, color=P["drift"], density=True,
900
+ label="Drifted")
901
+ ax.set(xlabel="Drift score", ylabel="Density",
902
+ title="Score distribution")
903
+ ax.legend()
904
+ ax.grid(alpha=0.3)
905
+ plt.tight_layout()
906
+ plt.savefig(fig_dir / "fig8_calibration.png", dpi=300, bbox_inches="tight")
907
+ plt.close()
908
+ logger.info(" fig8 saved")
909
+
910
+ # ── Fig 10: Temporal distance ────────────────────────────────────────
911
+ if temporal_data and len(temporal_data.get("bins", [])) > 10:
912
+ bins_ = np.array(temporal_data["bins"])
913
+ scrs_ = np.array(temporal_data["scores"])
914
+ fig, ax = plt.subplots(figsize=(10, 6))
915
+ be = np.linspace(bins_.min(), bins_.max(), 10)
916
+ bm = (be[:-1] + be[1:]) / 2
917
+ bmean = [scrs_[(bins_ >= lo) & (bins_ < hi)].mean()
918
+ if ((bins_ >= lo) & (bins_ < hi)).sum() > 0 else np.nan
919
+ for lo, hi in zip(be[:-1], be[1:])]
920
+ ax.scatter(bins_, scrs_, alpha=0.3, s=15, color=P["drift"])
921
+ ax.plot(bm, bmean, "o-", color="black", lw=2.5, ms=8,
922
+ label=f"Bin mean (r={temporal_data['correlation']:.3f})")
923
+ ax.axvline(0, color="gray", ls="--", alpha=0.6, label="Cutoff")
924
+ ax.set(xlabel="Months since cutoff",
925
+ ylabel="Drift probe score",
926
+ title=f"[{model_key}] [NEW-E] Temporal Distance")
927
+ ax.legend()
928
+ ax.grid(alpha=0.3)
929
+ plt.tight_layout()
930
+ plt.savefig(fig_dir / "fig10_temporal_distance.png",
931
+ dpi=300, bbox_inches="tight")
932
+ plt.close()
933
+ logger.info(" fig10 saved")
934
+
935
+ # ── Fig 11: Per-relation heatmap ─────────────────────────────────────
936
+ bl = best
937
+ pr_d = all_lr[bl].get("per_relation", {})
938
+ if len(pr_d) >= 3:
939
+ rels = sorted(pr_d.keys())
940
+ d_au = [pr_d[r]["drift_auroc"] for r in rels]
941
+ c_au = [pr_d[r]["correct_auroc"] for r in rels]
942
+ nd_ = [pr_d[r]["n_drifted"] for r in rels]
943
+ ns_ = [pr_d[r]["n_stable"] for r in rels]
944
+ fig, axes7 = plt.subplots(1, 2, figsize=(18, 7))
945
+ fig.suptitle(f"[{model_key}] Per-Relation AUROC (Layer {bl})",
946
+ fontsize=14, fontweight="bold")
947
+ x = np.arange(len(rels))
948
+ w = 0.35
949
+ axes7[0].bar(x - w / 2, d_au, w, color=P["drift"], edgecolor="black",
950
+ lw=0.5, label="Drift")
951
+ axes7[0].bar(x + w / 2, c_au, w, color=P["corr"], edgecolor="black",
952
+ lw=0.5, label="Correctness")
953
+ axes7[0].set_xticks(x)
954
+ axes7[0].set_xticklabels(rels, rotation=35, ha="right", fontsize=9)
955
+ axes7[0].axhline(0.5, color="gray", ls="--", alpha=0.5)
956
+ axes7[0].set(ylabel="AUROC", title="AUROC by relation")
957
+ axes7[0].legend()
958
+ axes7[0].grid(alpha=0.3, axis="y")
959
+ axes7[1].barh(rels, nd_, color=P["drift"], alpha=0.7, label="Drifted")
960
+ axes7[1].barh(rels, ns_, left=nd_, color=P["unc"], alpha=0.7,
961
+ label="Stable")
962
+ axes7[1].set(xlabel="Count", title="Class balance")
963
+ axes7[1].legend()
964
+ axes7[1].grid(alpha=0.3, axis="x")
965
+ plt.tight_layout()
966
+ plt.savefig(fig_dir / "fig11_relation_heatmap.png",
967
+ dpi=300, bbox_inches="tight")
968
+ plt.close()
969
+ logger.info(" fig11 saved")
970
+
971
+ logger.info(f"All figures -> {fig_dir}")
972
+
973
+
974
+ # ─────────────────────────────────────────────────────────────────────────────
975
+ # MAIN
976
+ # ─────────────────────────────────────────────────────────────────────────────
977
+
978
+ def run(model_key, cfg, output_dir, probe_device, layers_override,
979
+ n_permutations, max_iter, cv_max_iter,
980
+ skip_logit_lens, skip_sparsity, skip_permutation):
981
+
982
+ mcfg = cfg["models"][model_key]
983
+ defaults = cfg.get("defaults", {})
984
+ lambda_grid = defaults.get("lambda_grid",
985
+ [1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 5e-2])
986
+ sparsity_lambdas = defaults.get("sparsity_lambdas",
987
+ [1e-6, 1e-5, 5e-5, 1e-4, 5e-4, 1e-3,
988
+ 5e-3, 1e-2, 5e-2, 0.1, 0.2])
989
+ cutoff_months = mcfg.get("cutoff_months", 48)
990
+
991
+ model_dir = Path(output_dir) / model_key
992
+ model_dir.mkdir(parents=True, exist_ok=True)
993
+ (model_dir / "per_layer").mkdir(exist_ok=True)
994
+
995
+ fh = logging.FileHandler(model_dir / "analysis.log")
996
+ fh.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"))
997
+ logger.addHandler(fh)
998
+
999
+ # Load cache
1000
+ cache_path = model_dir / f"cached_{model_key}.npz"
1001
+ if not cache_path.exists():
1002
+ logger.error(f"Cache not found: {cache_path}")
1003
+ logger.error("Run extract_models.py first.")
1004
+ sys.exit(1)
1005
+
1006
+ logger.info(f"\n{'='*70}")
1007
+ logger.info(f" {model_key} — Analysis")
1008
+ logger.info(f"{'='*70}")
1009
+ logger.info(f"Loading cache: {cache_path}")
1010
+ results = np.load(str(cache_path), allow_pickle=True)["results"].tolist()
1011
+ n_d = sum(1 for r in results if r["is_drifted"])
1012
+ logger.info(f" {len(results)} samples ({n_d} drifted, {len(results)-n_d} stable)")
1013
+
1014
+ # Determine layers
1015
+ num_layers = max(max(r["hidden_states"].keys()) for r in results) + 1
1016
+ all_layers = layers_override or list(range(num_layers))
1017
+
1018
+ # Resume from checkpoint
1019
+ all_lr = {}
1020
+ rj = model_dir / "all_layer_results.json"
1021
+ if rj.exists():
1022
+ with open(rj) as f:
1023
+ saved = json.load(f)
1024
+ all_lr = {int(k): v for k, v in saved.items()}
1025
+ logger.info(f"Resumed: {len(all_lr)} layers done")
1026
+
1027
+ # Layer loop
1028
+ for layer in all_layers:
1029
+ if layer in all_lr and "drift_auroc" in all_lr[layer]:
1030
+ logger.info(f"L{layer}: skip (AUROC={all_lr[layer]['drift_auroc']:.4f})")
1031
+ continue
1032
+
1033
+ logger.info(f"\n── Layer {layer}/{num_layers-1} ──")
1034
+ res = analyze_layer(layer, results, probe_device, lambda_grid,
1035
+ max_iter, cv_max_iter)
1036
+
1037
+ logger.info(
1038
+ f" Drift={res['drift_auroc']:.4f} "
1039
+ f"(lam={res['drift_lam']:.0e}, act={res['n_active_drift']}) "
1040
+ f"Unc={res['uncertainty_auroc']:.4f} "
1041
+ f"Corr={res['correctness_auroc']:.4f}")
1042
+ logger.info(
1043
+ f" cos(d,u)={res['cos_du']:+.4f} "
1044
+ f"cos(d,c)={res['cos_dc']:+.4f} "
1045
+ f"null_space={res['null_space_drift_auroc']:.4f} "
1046
+ f"drop={res['drift_auroc']-res['null_space_drift_auroc']:+.4f}")
1047
+ logger.info(
1048
+ f" Jaccard(d,u)={res['jaccard_du']:.2%} "
1049
+ f"Jaccard(d,c)={res['jaccard_dc']:.2%} "
1050
+ f"t={res['elapsed_s']:.1f}s")
1051
+
1052
+ cb = res["cells"].get("B_confident_drifted", {})
1053
+ if cb.get("n", 0) > 0:
1054
+ logger.info(
1055
+ f" [CellB] drift={cb['drift_mean']:.3f} "
1056
+ f"corr={cb['correct_mean']:.3f} "
1057
+ f"d_flag={cb['drift_flag_rate']:.1%} "
1058
+ f"c_flag={cb['correct_flag_rate']:.1%}")
1059
+
1060
+ # Save (strip weight vectors for JSON)
1061
+ save_res = {k: v for k, v in res.items() if not k.startswith("_")}
1062
+ all_lr[layer] = res # keep full for figures
1063
+
1064
+ with open(model_dir / "per_layer" / f"layer_{layer:02d}.json", "w") as f:
1065
+ json.dump(save_res, f, indent=2, default=str)
1066
+ with open(rj, "w") as f:
1067
+ json.dump({int(k): {kk: vv for kk, vv in v.items()
1068
+ if not kk.startswith("_")}
1069
+ for k, v in all_lr.items()},
1070
+ f, indent=2, default=str)
1071
+
1072
+ # Incremental figures
1073
+ save_figures(all_lr, str(model_dir), model_key, results)
1074
+
1075
+ # Best layer
1076
+ best_layer = int(max(all_lr, key=lambda l: all_lr[l]["drift_auroc"]))
1077
+ logger.info(f"\nBest layer: {best_layer} "
1078
+ f"AUROC={all_lr[best_layer]['drift_auroc']:.4f}")
1079
+
1080
+ # Re-fit best for full probe objects if needed
1081
+ if "_w_drift" not in all_lr[best_layer]:
1082
+ logger.info("Re-fitting best layer...")
1083
+ all_lr[best_layer] = analyze_layer(
1084
+ best_layer, results, probe_device, lambda_grid,
1085
+ max_iter, cv_max_iter)
1086
+
1087
+ # [NEW-B] Direction stability
1088
+ logger.info("[NEW-B] Direction stability...")
1089
+ for l in all_layers:
1090
+ if "_w_drift" not in all_lr.get(l, {}):
1091
+ X = np.array([r["hidden_states"][l] for r in results])
1092
+ y = np.array([int(r["is_drifted"]) for r in results])
1093
+ pw = fit_probe(X, y, 1e-3, probe_device, 400)
1094
+ if l not in all_lr:
1095
+ all_lr[l] = {}
1096
+ all_lr[l]["_w_drift"] = pw.w_np
1097
+ stability_data = probe_direction_stability(all_lr)
1098
+ mean_cos = float(np.mean(
1099
+ stability_data[1][~np.eye(len(stability_data[1]), dtype=bool)]))
1100
+ logger.info(f" Mean cross-layer cosine: {mean_cos:.4f}")
1101
+
1102
+ # [NEW-C] Logit lens
1103
+ lens_data = None
1104
+ if not skip_logit_lens:
1105
+ logger.info("[NEW-C] Logit lens...")
1106
+ lens_data = logit_lens_analysis(results, str(model_dir),
1107
+ model_key, probe_device)
1108
+
1109
+ # [PERM] Permutation test
1110
+ perm_res = None
1111
+ if not skip_permutation:
1112
+ perm_res = permutation_test(results, best_layer, n_permutations,
1113
+ probe_device, lambda_grid)
1114
+
1115
+ # [SPARSE] Sparsity curve
1116
+ sparsity_data = None
1117
+ if not skip_sparsity:
1118
+ sparsity_data = sparsity_curve(results, best_layer, probe_device,
1119
+ sparsity_lambdas)
1120
+
1121
+ # [NEW-E] Temporal distance
1122
+ X_all = np.array([r["hidden_states"][best_layer] for r in results])
1123
+ dp = all_lr[best_layer]["_probes"]["drift"]
1124
+ scores = dp.predict_proba(X_all)[:, 1]
1125
+ for r, sc in zip(results, scores):
1126
+ r["_drift_score"] = float(sc)
1127
+ temporal_data = temporal_distance_analysis(results, cutoff_months)
1128
+
1129
+ # Final figures
1130
+ save_figures(all_lr, str(model_dir), model_key, results,
1131
+ stability_data=stability_data, lens_data=lens_data,
1132
+ sparsity_data=sparsity_data, temporal_data=temporal_data)
1133
+
1134
+ # Export probe bundle
1135
+ export_probe_bundle(all_lr, best_layer, results, model_key, str(model_dir))
1136
+
1137
+ # Final summary
1138
+ bl = all_lr[best_layer]
1139
+ drop = bl["drift_auroc"] - bl["null_space_drift_auroc"]
1140
+ print(f"\n{'='*70}")
1141
+ print(f" [{model_key}] FINAL RESULTS")
1142
+ print(f"{'='*70}")
1143
+ print(f" Best layer: {best_layer}")
1144
+ print(f" Drift AUROC: {bl['drift_auroc']:.4f}")
1145
+ print(f" Uncertainty AUROC: {bl['uncertainty_auroc']:.4f}")
1146
+ print(f" Correctness AUROC: {bl['correctness_auroc']:.4f}")
1147
+ print(f" Null-space drift AUROC: {bl['null_space_drift_auroc']:.4f}")
1148
+ print(f" cos(drift, unc): {bl['cos_du']:+.4f}")
1149
+ print(f" cos(drift, corr): {bl['cos_dc']:+.4f}")
1150
+ print(f" Active neurons (drift): {bl['n_active_drift']}")
1151
+ print(f" Jaccard(drift, unc): {bl['jaccard_du']:.2%}")
1152
+ print(f" Null-space drop: {drop:+.4f}")
1153
+ print(f" Mean cross-layer cos: {mean_cos:.4f}")
1154
+ if perm_res:
1155
+ print(f" Permutation p-value: {perm_res['p_value']:.6f}")
1156
+ cb = bl.get("cells", {}).get("B_confident_drifted", {})
1157
+ if cb.get("n", 0) > 0:
1158
+ print(f"\n [IDEA-1] Cell B (n={cb['n']}):")
1159
+ print(f" Drift: {cb['drift_mean']:.3f} "
1160
+ f"(flagged {cb['drift_flag_rate']:.1%})")
1161
+ print(f" Correctness: {cb['correct_mean']:.3f} "
1162
+ f"(flagged {cb['correct_flag_rate']:.1%})")
1163
+ if cb["drift_mean"] > 0.55 and cb["correct_mean"] < 0.55:
1164
+ print(" DISSOCIATION CONFIRMED")
1165
+ print()
1166
+ if abs(bl["cos_du"]) < 0.1:
1167
+ print(" ORTHOGONAL: |cos(drift,unc)| < 0.10")
1168
+ if abs(bl["cos_dc"]) < 0.3:
1169
+ print(" DISTINCT: |cos(drift,corr)| < 0.30")
1170
+ if abs(drop) < 0.02:
1171
+ print(f" NULL-SPACE STABLE: drop={drop:+.4f}")
1172
+ if mean_cos > 0.5:
1173
+ print(f" GLOBAL DIRECTION: mean_cos={mean_cos:.3f}")
1174
+ print(f"{'='*70}")
1175
+
1176
+ # Save final results
1177
+ final = {
1178
+ "model_key": model_key,
1179
+ "model_name": mcfg["name"],
1180
+ "n_samples": len(results),
1181
+ "n_drifted": n_d,
1182
+ "best_layer": best_layer,
1183
+ "best_layer_results": {k: v for k, v in bl.items()
1184
+ if not k.startswith("_")},
1185
+ "permutation_test": perm_res,
1186
+ "sparsity_curve": sparsity_data,
1187
+ "logit_lens": lens_data,
1188
+ "temporal_distance": temporal_data,
1189
+ "probe_stability": {
1190
+ "layers": stability_data[0],
1191
+ "mean_cross_layer_cosine": float(mean_cos),
1192
+ },
1193
+ "timestamp": datetime.now().isoformat(),
1194
+ }
1195
+ with open(model_dir / "final_results.json", "w") as f:
1196
+ json.dump(final, f, indent=2, default=str)
1197
+
1198
+ logger.info(f"\nAll results saved to {model_dir}")
1199
+ return results, best_layer
1200
+
1201
+
1202
+ def main():
1203
+ import sys
1204
+ p = argparse.ArgumentParser(
1205
+ description="Per-model disentanglement analysis",
1206
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
1207
+ p.add_argument("--model", required=True,
1208
+ help="Model key from models.yaml")
1209
+ p.add_argument("--config", default="models.yaml")
1210
+ p.add_argument("--output_dir", default=None)
1211
+ p.add_argument("--probe_device", default="cuda:0")
1212
+ p.add_argument("--layers", type=int, nargs="+", default=None)
1213
+ p.add_argument("--n_permutations", type=int, default=None)
1214
+ p.add_argument("--max_iter", type=int, default=None)
1215
+ p.add_argument("--cv_max_iter", type=int, default=None)
1216
+ p.add_argument("--skip_logit_lens", action="store_true")
1217
+ p.add_argument("--skip_sparsity", action="store_true")
1218
+ p.add_argument("--skip_permutation", action="store_true")
1219
+ args = p.parse_args()
1220
+
1221
+ cfg = load_config(args.config)
1222
+ defaults = cfg.get("defaults", {})
1223
+ output_dir = args.output_dir or defaults.get("output_dir",
1224
+ "data/experiments/v4")
1225
+ n_perms = args.n_permutations or defaults.get("n_permutations", 1000)
1226
+ max_iter = args.max_iter or defaults.get("max_iter", 2000)
1227
+ cv_max_iter = args.cv_max_iter or defaults.get("cv_max_iter", 500)
1228
+
1229
+ run(args.model, cfg, output_dir, args.probe_device, args.layers,
1230
+ n_perms, max_iter, cv_max_iter,
1231
+ args.skip_logit_lens, args.skip_sparsity, args.skip_permutation)
1232
+
1233
+
1234
+ if __name__ == "__main__":
1235
+ main()
collect_all.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Knowledge Drift Dataset - Master Collection Script
3
+ ====================================================
4
+ Orchestrates all data collection and produces the final unified dataset.
5
+
6
+ Usage:
7
+ python collect_all.py --all # Run everything
8
+ python collect_all.py --static # Only static/manual facts
9
+ python collect_all.py --wikidata # Only Wikidata queries
10
+ python collect_all.py --merge # Merge existing raw files
11
+ python collect_all.py --stats # Print dataset statistics
12
+ """
13
+
14
+ import argparse
15
+ import json
16
+ import os
17
+ import logging
18
+ from datetime import datetime
19
+ from typing import Dict, List
20
+ from collections import Counter
21
+
22
+ import sys
23
+ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
24
+ sys.path.insert(0, SCRIPT_DIR)
25
+
26
+ logging.basicConfig(
27
+ level=logging.INFO,
28
+ format='%(asctime)s - %(levelname)s - %(message)s'
29
+ )
30
+ logger = logging.getLogger(__name__)
31
+
32
+ DATA_DIR = os.path.join(SCRIPT_DIR, "data")
33
+
34
+
35
+ def ensure_data_dir():
36
+ os.makedirs(DATA_DIR, exist_ok=True)
37
+
38
+
39
+ def collect_static():
40
+ """Run static facts collection."""
41
+ from collectors.static_collector import collect_all_static
42
+
43
+ logger.info("=" * 60)
44
+ logger.info("COLLECTING STATIC/MANUAL FACTS")
45
+ logger.info("=" * 60)
46
+
47
+ results = collect_all_static()
48
+
49
+ output_path = os.path.join(DATA_DIR, "static_facts.json")
50
+ with open(output_path, 'w', encoding='utf-8') as f:
51
+ json.dump(results, f, indent=2, ensure_ascii=False)
52
+
53
+ logger.info(f"Static facts saved to {output_path}")
54
+ return results
55
+
56
+
57
+ def collect_wikidata():
58
+ """Run Wikidata SPARQL collection."""
59
+ from collectors.wikidata_collector import collect_all_wikidata
60
+
61
+ logger.info("=" * 60)
62
+ logger.info("COLLECTING FROM WIKIDATA")
63
+ logger.info("=" * 60)
64
+
65
+ results = collect_all_wikidata()
66
+
67
+ output_path = os.path.join(DATA_DIR, "wikidata_raw.json")
68
+ with open(output_path, 'w', encoding='utf-8') as f:
69
+ json.dump(results, f, indent=2, ensure_ascii=False)
70
+
71
+ logger.info(f"Wikidata facts saved to {output_path}")
72
+ return results
73
+
74
+
75
+ def flatten_to_query_level(all_data: Dict) -> List[Dict]:
76
+ """
77
+ Flatten the hierarchical dataset into individual query-level samples.
78
+ Each sample is a single query at a single timestamp — ready for model inference.
79
+
80
+ This is the format you'll feed to the model + probe pipeline:
81
+ - Run model on query
82
+ - Extract hidden states
83
+ - Record model's answer
84
+ - Compare with expected answer
85
+ - Train/evaluate drift detection probe
86
+ """
87
+ flat_samples = []
88
+ sample_id = 0
89
+
90
+ for source_key, items in all_data.items():
91
+ for item in items:
92
+ # Handle items with temporal_variants already generated
93
+ if "temporal_variants" in item:
94
+ for variant in item["temporal_variants"]:
95
+ flat_sample = {
96
+ "sample_id": f"q_{sample_id:06d}",
97
+ "query": variant["query"],
98
+ "expected_answer": variant.get("expected_answer", ""),
99
+ "year": variant.get("year", 0),
100
+ "temporal_zone": variant.get("temporal_zone", "unknown"),
101
+ "is_drifted_query": variant.get("is_drifted_query", False),
102
+ "model_likely_answer": variant.get("model_likely_answer", ""),
103
+ "language": variant.get("language", "en"),
104
+ # Metadata from parent
105
+ "entity": item.get("entity", ""),
106
+ "relation": item.get("relation", ""),
107
+ "knowledge_type": item.get("knowledge_type", ""),
108
+ "category": item.get("category", ""),
109
+ "source": item.get("source", ""),
110
+ "parent_id": item.get("id", ""),
111
+ "drift_date": item.get("drift_date", ""),
112
+ }
113
+ flat_samples.append(flat_sample)
114
+ sample_id += 1
115
+
116
+ # Handle bilingual items (Arabic)
117
+ for lang_key in ["temporal_variants_ar", "temporal_variants_en"]:
118
+ if lang_key in item:
119
+ for variant in item[lang_key]:
120
+ flat_sample = {
121
+ "sample_id": f"q_{sample_id:06d}",
122
+ "query": variant["query"],
123
+ "expected_answer": variant.get("expected_answer", ""),
124
+ "year": variant.get("year", 0),
125
+ "temporal_zone": variant.get("temporal_zone", "unknown"),
126
+ "is_drifted_query": variant.get("is_drifted_query", False),
127
+ "language": variant.get("language", "en"),
128
+ "entity": item.get("entity", item.get("entity_en", "")),
129
+ "relation": item.get("relation", item.get("relation_en", "")),
130
+ "knowledge_type": item.get("knowledge_type", ""),
131
+ "category": item.get("category", ""),
132
+ "source": item.get("source", ""),
133
+ "parent_id": item.get("id", ""),
134
+ "is_bilingual": True,
135
+ "drift_date": item.get("drift_date", ""),
136
+ }
137
+ flat_samples.append(flat_sample)
138
+ sample_id += 1
139
+
140
+ # Handle raw wikidata items without temporal variants
141
+ if "temporal_variants" not in item and "temporal_variants_ar" not in item:
142
+ from configs.config import TIMESTAMP_YEARS
143
+ templates = item.get("templates", [])
144
+ if not templates:
145
+ continue
146
+ template = templates[0]
147
+
148
+ for year in TIMESTAMP_YEARS:
149
+ temporal_zone = "pre_cutoff" if year < 2024 else (
150
+ "near_cutoff" if year == 2024 else "post_cutoff"
151
+ )
152
+
153
+ try:
154
+ query = template.format(
155
+ year=year,
156
+ subject=item.get("entity", ""),
157
+ object="___"
158
+ )
159
+ except (KeyError, IndexError):
160
+ query = f"In {year}, {item.get('entity', '')} {item.get('relation', '')} ___"
161
+
162
+ # Determine expected answer
163
+ if item["category"] == "unknown_drift":
164
+ if temporal_zone == "post_cutoff":
165
+ expected = item.get("new_answer", "")
166
+ model_likely = item.get("old_answer", "")
167
+ is_drifted = True
168
+ else:
169
+ expected = item.get("old_answer", "")
170
+ model_likely = ""
171
+ is_drifted = False
172
+ elif item["category"] == "known_drift":
173
+ # Try to determine which answer based on year
174
+ drift_date = item.get("drift_date", "")
175
+ try:
176
+ drift_year = int(drift_date[:4]) if drift_date else 2023
177
+ except ValueError:
178
+ drift_year = 2023
179
+ expected = item["old_answer"] if year < drift_year else item["new_answer"]
180
+ model_likely = ""
181
+ is_drifted = False
182
+ else: # no_drift
183
+ expected = item.get("answer", "")
184
+ model_likely = ""
185
+ is_drifted = False
186
+
187
+ flat_sample = {
188
+ "sample_id": f"q_{sample_id:06d}",
189
+ "query": query,
190
+ "expected_answer": expected,
191
+ "year": year,
192
+ "temporal_zone": temporal_zone,
193
+ "is_drifted_query": is_drifted,
194
+ "model_likely_answer": model_likely,
195
+ "language": "en",
196
+ "entity": item.get("entity", ""),
197
+ "relation": item.get("relation", ""),
198
+ "knowledge_type": item.get("knowledge_type", ""),
199
+ "category": item.get("category", ""),
200
+ "source": item.get("source", ""),
201
+ "parent_id": item.get("entity_qid", ""),
202
+ "drift_date": item.get("drift_date", ""),
203
+ }
204
+ flat_samples.append(flat_sample)
205
+ sample_id += 1
206
+
207
+ return flat_samples
208
+
209
+
210
+ def merge_all():
211
+ """Merge all collected data into a single unified dataset."""
212
+ logger.info("=" * 60)
213
+ logger.info("MERGING ALL DATA")
214
+ logger.info("=" * 60)
215
+
216
+ all_data = {}
217
+
218
+ # Load static facts
219
+ static_path = os.path.join(DATA_DIR, "static_facts.json")
220
+ if os.path.exists(static_path):
221
+ with open(static_path, 'r', encoding='utf-8') as f:
222
+ static_data = json.load(f)
223
+ all_data.update(static_data)
224
+ logger.info(f"Loaded static facts from {static_path}")
225
+
226
+ # Load wikidata facts
227
+ wiki_path = os.path.join(DATA_DIR, "wikidata_raw.json")
228
+ if os.path.exists(wiki_path):
229
+ with open(wiki_path, 'r', encoding='utf-8') as f:
230
+ wiki_data = json.load(f)
231
+ all_data.update(wiki_data)
232
+ logger.info(f"Loaded wikidata facts from {wiki_path}")
233
+
234
+ # Flatten to query level
235
+ flat_samples = flatten_to_query_level(all_data)
236
+
237
+ # Build the final dataset
238
+ dataset = {
239
+ "metadata": {
240
+ "name": "Knowledge Drift Detection Dataset",
241
+ "version": "1.0",
242
+ "created": datetime.now().isoformat(),
243
+ "model_target": "Qwen 2.5 7B",
244
+ "model_cutoff": "2024-08-01",
245
+ "description": (
246
+ "Dataset for detecting knowledge drift in LLMs using "
247
+ "mechanistic interpretability. Contains temporally-scoped "
248
+ "factual queries across stable and changing knowledge."
249
+ ),
250
+ "categories": {
251
+ "stable": "Category 1 - Facts that never change (control)",
252
+ "known_drift": "Category 2 - Facts that changed pre-cutoff (model knows both)",
253
+ "unknown_drift": "Category 3 - Facts that changed post-cutoff (model doesn't know)",
254
+ "no_drift": "Category 4 - Changeable facts that didn't change (control)",
255
+ },
256
+ "total_samples": len(flat_samples),
257
+ },
258
+ "samples": flat_samples,
259
+ }
260
+
261
+ # Save the unified dataset
262
+ output_path = os.path.join(DATA_DIR, "knowledge_drift_dataset.json")
263
+ with open(output_path, 'w', encoding='utf-8') as f:
264
+ json.dump(dataset, f, indent=2, ensure_ascii=False)
265
+
266
+ logger.info(f"Saved unified dataset to {output_path}")
267
+ logger.info(f"Total query-level samples: {len(flat_samples)}")
268
+
269
+ # Also save a compact version for quick loading
270
+ compact_path = os.path.join(DATA_DIR, "knowledge_drift_compact.jsonl")
271
+ with open(compact_path, 'w', encoding='utf-8') as f:
272
+ for sample in flat_samples:
273
+ f.write(json.dumps(sample, ensure_ascii=False) + '\n')
274
+
275
+ logger.info(f"Saved compact JSONL to {compact_path}")
276
+
277
+ return dataset
278
+
279
+
280
+ def print_stats():
281
+ """Print detailed statistics about the dataset."""
282
+ dataset_path = os.path.join(DATA_DIR, "knowledge_drift_dataset.json")
283
+
284
+ if not os.path.exists(dataset_path):
285
+ logger.error(f"Dataset not found at {dataset_path}. Run --merge first.")
286
+ return
287
+
288
+ with open(dataset_path, 'r', encoding='utf-8') as f:
289
+ dataset = json.load(f)
290
+
291
+ samples = dataset["samples"]
292
+
293
+ print("\n" + "=" * 70)
294
+ print(" KNOWLEDGE DRIFT DATASET STATISTICS")
295
+ print("=" * 70)
296
+
297
+ print(f"\nTotal query-level samples: {len(samples)}")
298
+
299
+ # By category
300
+ cat_counts = Counter(s["category"] for s in samples)
301
+ print(f"\n📊 By Category:")
302
+ for cat, count in sorted(cat_counts.items()):
303
+ pct = 100 * count / len(samples)
304
+ bar = "█" * int(pct / 2)
305
+ print(f" {cat:20s}: {count:5d} ({pct:5.1f}%) {bar}")
306
+
307
+ # By temporal zone
308
+ tz_counts = Counter(s["temporal_zone"] for s in samples)
309
+ print(f"\n⏰ By Temporal Zone:")
310
+ for tz, count in sorted(tz_counts.items()):
311
+ pct = 100 * count / len(samples)
312
+ print(f" {tz:20s}: {count:5d} ({pct:5.1f}%)")
313
+
314
+ # By knowledge type
315
+ kt_counts = Counter(s["knowledge_type"] for s in samples)
316
+ print(f"\n🧠 By Knowledge Type:")
317
+ for kt, count in sorted(kt_counts.items()):
318
+ pct = 100 * count / len(samples)
319
+ print(f" {kt:20s}: {count:5d} ({pct:5.1f}%)")
320
+
321
+ # Drifted queries
322
+ drifted = sum(1 for s in samples if s.get("is_drifted_query", False))
323
+ print(f"\n🔄 Drifted Queries (post-cutoff, answer changed):")
324
+ if len(samples) > 0:
325
+ print(f" Total: {drifted} / {len(samples)} ({100*drifted/len(samples):.1f}%)")
326
+ else:
327
+ print(f" Total: 0 (dataset is empty — check that --merge ran correctly)")
328
+
329
+ # By language
330
+ lang_counts = Counter(s.get("language", "en") for s in samples)
331
+ print(f"\n🌍 By Language:")
332
+ for lang, count in sorted(lang_counts.items()):
333
+ print(f" {lang:5s}: {count:5d}")
334
+
335
+ # By source
336
+ src_counts = Counter(s.get("source", "unknown") for s in samples)
337
+ print(f"\n📦 By Source:")
338
+ for src, count in sorted(src_counts.items()):
339
+ print(f" {src:25s}: {count:5d}")
340
+
341
+ # By year
342
+ year_counts = Counter(s.get("year", 0) for s in samples)
343
+ print(f"\n📅 By Year:")
344
+ for year, count in sorted(year_counts.items()):
345
+ marker = " ← CUTOFF" if year == 2024 else (" ← POST-CUTOFF (drift zone)" if year == 2025 else "")
346
+ print(f" {year}: {count:5d}{marker}")
347
+
348
+ # Key comparisons for mechanistic interpretability
349
+ print(f"\n🔬 Key Comparisons for Probing:")
350
+
351
+ post_cutoff_drifted = [s for s in samples if s["temporal_zone"] == "post_cutoff" and s.get("is_drifted_query")]
352
+ post_cutoff_stable = [s for s in samples if s["temporal_zone"] == "post_cutoff" and not s.get("is_drifted_query")]
353
+ pre_cutoff = [s for s in samples if s["temporal_zone"] == "pre_cutoff"]
354
+
355
+ print(f" Comparison 1 (drift vs no-drift, both post-cutoff):")
356
+ print(f" Post-cutoff DRIFTED: {len(post_cutoff_drifted)}")
357
+ print(f" Post-cutoff STABLE: {len(post_cutoff_stable)}")
358
+
359
+ print(f" Comparison 2 (pre vs post cutoff):")
360
+ print(f" Pre-cutoff queries: {len(pre_cutoff)}")
361
+ print(f" Post-cutoff queries: {len(post_cutoff_drifted) + len(post_cutoff_stable)}")
362
+
363
+ # Show some example queries
364
+ print(f"\n📝 Example Queries:")
365
+
366
+ for cat_name in ["stable", "known_drift", "unknown_drift", "no_drift"]:
367
+ cat_samples = [s for s in samples if s["category"] == cat_name]
368
+ if cat_samples:
369
+ example = cat_samples[0]
370
+ print(f"\n [{cat_name}]")
371
+ print(f" Query: {example['query']}")
372
+ print(f" Answer: {example['expected_answer']}")
373
+ if example.get('model_likely_answer'):
374
+ print(f" Model likely says: {example['model_likely_answer']}")
375
+
376
+ print("\n" + "=" * 70)
377
+
378
+
379
+ def main():
380
+ parser = argparse.ArgumentParser(description="Knowledge Drift Dataset Collection")
381
+ parser.add_argument("--all", action="store_true", help="Run everything")
382
+ parser.add_argument("--static", action="store_true", help="Collect static facts only")
383
+ parser.add_argument("--wikidata", action="store_true", help="Collect from Wikidata only")
384
+ parser.add_argument("--merge", action="store_true", help="Merge existing raw files")
385
+ parser.add_argument("--stats", action="store_true", help="Print dataset statistics")
386
+
387
+ args = parser.parse_args()
388
+
389
+ # Default to --all if no flags
390
+ if not any([args.all, args.static, args.wikidata, args.merge, args.stats]):
391
+ args.all = True
392
+
393
+ ensure_data_dir()
394
+
395
+ if args.all or args.static:
396
+ collect_static()
397
+
398
+ if args.all or args.wikidata:
399
+ collect_wikidata()
400
+
401
+ if args.all or args.merge:
402
+ merge_all()
403
+
404
+ if args.all or args.stats:
405
+ print_stats()
406
+
407
+
408
+ if __name__ == "__main__":
409
+ main()
collectors/__init__.py ADDED
File without changes
collectors/static_collector.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Static Facts Collector
3
+ ======================
4
+ Processes manually curated facts:
5
+ - Category 1: Stable/timeless facts
6
+ - Category 3 supplements: Verified post-cutoff changes
7
+ - Category 4 supplements: Verified post-cutoff unchanged facts
8
+ - Category 5: Arabic/MENA knowledge
9
+ """
10
+
11
+ import json
12
+ import os
13
+ import logging
14
+ from typing import List, Dict
15
+
16
+ import sys
17
+ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
18
+ PROJECT_DIR = os.path.dirname(SCRIPT_DIR)
19
+ sys.path.insert(0, PROJECT_DIR)
20
+ from configs.config import (
21
+ STABLE_FACTS, VERIFIED_POST_CUTOFF_CHANGES,
22
+ VERIFIED_UNCHANGED_POST_CUTOFF, ARABIC_KNOWLEDGE,
23
+ TIMESTAMP_YEARS
24
+ )
25
+
26
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ def collect_stable_facts() -> List[Dict]:
31
+ """
32
+ Category 1: Facts that never change.
33
+ Generate temporal variants for each fact across all timestamp years.
34
+ """
35
+ samples = []
36
+ sample_id = 0
37
+
38
+ for knowledge_type, facts in STABLE_FACTS.items():
39
+ for fact in facts:
40
+ if "pairs" in fact:
41
+ # This is a parameterized fact (e.g., capital of {country})
42
+ for subject, answer in fact["pairs"]:
43
+ base_query = fact["query"].replace("{country}", subject).replace("{continent}", answer)
44
+
45
+ # Generate temporal variants
46
+ temporal_variants = []
47
+ for year in TIMESTAMP_YEARS:
48
+ temporal_zone = "pre_cutoff" if year < 2024 else (
49
+ "near_cutoff" if year == 2024 else "post_cutoff"
50
+ )
51
+
52
+ # Add year prefix to query
53
+ query_with_year = f"In {year}, " + base_query.replace(" is ___", " was ___" if year < 2025 else " is ___")
54
+
55
+ temporal_variants.append({
56
+ "query": query_with_year,
57
+ "year": year,
58
+ "temporal_zone": temporal_zone,
59
+ "expected_answer": answer,
60
+ "is_drifted_query": False,
61
+ })
62
+
63
+ sample = {
64
+ "id": f"stable_{sample_id:04d}",
65
+ "entity": subject,
66
+ "relation": fact["query"].split("{")[0].strip(),
67
+ "knowledge_type": knowledge_type,
68
+ "category": "stable",
69
+ "answer": answer,
70
+ "temporal_variants": temporal_variants,
71
+ "source": "manual_curated",
72
+ }
73
+ samples.append(sample)
74
+ sample_id += 1
75
+ else:
76
+ # Single fact
77
+ temporal_variants = []
78
+ for year in TIMESTAMP_YEARS:
79
+ temporal_zone = "pre_cutoff" if year < 2024 else (
80
+ "near_cutoff" if year == 2024 else "post_cutoff"
81
+ )
82
+
83
+ query_text = f"In {year}, " + fact["query"].lower()
84
+ temporal_variants.append({
85
+ "query": query_text,
86
+ "year": year,
87
+ "temporal_zone": temporal_zone,
88
+ "expected_answer": fact["answer"],
89
+ "is_drifted_query": False,
90
+ })
91
+
92
+ sample = {
93
+ "id": f"stable_{sample_id:04d}",
94
+ "entity": fact["query"].split("___")[0].strip(),
95
+ "relation": "fact",
96
+ "knowledge_type": knowledge_type,
97
+ "category": "stable",
98
+ "answer": fact["answer"],
99
+ "temporal_variants": temporal_variants,
100
+ "source": "manual_curated",
101
+ }
102
+ samples.append(sample)
103
+ sample_id += 1
104
+
105
+ logger.info(f"Collected {len(samples)} stable facts (Category 1)")
106
+ return samples
107
+
108
+
109
+ def collect_verified_post_cutoff_changes() -> List[Dict]:
110
+ """
111
+ Category 3 (supplement): Manually verified facts that changed after cutoff.
112
+ These are high-confidence examples where we KNOW the ground truth.
113
+ """
114
+ samples = []
115
+
116
+ for idx, fact in enumerate(VERIFIED_POST_CUTOFF_CHANGES):
117
+ # Generate temporal variants
118
+ temporal_variants = []
119
+
120
+ # Add pre-cutoff history
121
+ for hist in fact.get("pre_cutoff_history", []):
122
+ template = fact["templates"][0]
123
+ query = template.format(year=hist["year"])
124
+ temporal_variants.append({
125
+ "query": query,
126
+ "year": hist["year"],
127
+ "temporal_zone": "pre_cutoff",
128
+ "expected_answer": hist["answer"],
129
+ "is_drifted_query": False,
130
+ })
131
+
132
+ # Add the post-cutoff query (the drift point)
133
+ template = fact["templates"][0]
134
+ post_cutoff_query = template.format(year=2025)
135
+ temporal_variants.append({
136
+ "query": post_cutoff_query,
137
+ "year": 2025,
138
+ "temporal_zone": "post_cutoff",
139
+ "expected_answer": fact["new_answer"],
140
+ "model_likely_answer": fact["old_answer"],
141
+ "is_drifted_query": fact["old_answer"] != fact["new_answer"],
142
+ })
143
+
144
+ sample = {
145
+ "id": f"verified_drift_{idx:04d}",
146
+ "entity": fact["entity"],
147
+ "relation": fact["relation"],
148
+ "knowledge_type": fact["knowledge_type"],
149
+ "category": "unknown_drift",
150
+ "old_answer": fact["old_answer"],
151
+ "new_answer": fact["new_answer"],
152
+ "drift_date": fact.get("change_date", ""),
153
+ "temporal_variants": temporal_variants,
154
+ "all_templates": fact["templates"],
155
+ "note": fact.get("note", ""),
156
+ "source": "manual_verified",
157
+ }
158
+ samples.append(sample)
159
+
160
+ logger.info(f"Collected {len(samples)} verified post-cutoff changes (Category 3)")
161
+ return samples
162
+
163
+
164
+ def collect_verified_unchanged() -> List[Dict]:
165
+ """
166
+ Category 4 (supplement): Manually verified facts that did NOT change after cutoff.
167
+ Same type as Category 3 but stable — critical for the drift detection contrast.
168
+ """
169
+ samples = []
170
+
171
+ for idx, fact in enumerate(VERIFIED_UNCHANGED_POST_CUTOFF):
172
+ temporal_variants = []
173
+
174
+ for year in TIMESTAMP_YEARS:
175
+ temporal_zone = "pre_cutoff" if year < 2024 else (
176
+ "near_cutoff" if year == 2024 else "post_cutoff"
177
+ )
178
+ template = fact["templates"][0]
179
+ query = template.format(year=year)
180
+
181
+ temporal_variants.append({
182
+ "query": query,
183
+ "year": year,
184
+ "temporal_zone": temporal_zone,
185
+ "expected_answer": fact["answer"],
186
+ "is_drifted_query": False,
187
+ })
188
+
189
+ sample = {
190
+ "id": f"verified_stable_{idx:04d}",
191
+ "entity": fact["entity"],
192
+ "relation": fact["relation"],
193
+ "knowledge_type": fact["knowledge_type"],
194
+ "category": "no_drift",
195
+ "answer": fact["answer"],
196
+ "stable_since": fact["stable_since"],
197
+ "temporal_variants": temporal_variants,
198
+ "all_templates": fact["templates"],
199
+ "source": "manual_verified",
200
+ }
201
+ samples.append(sample)
202
+
203
+ logger.info(f"Collected {len(samples)} verified unchanged facts (Category 4)")
204
+ return samples
205
+
206
+
207
+ def collect_arabic_knowledge() -> List[Dict]:
208
+ """
209
+ Category 5: Arabic/MENA-specific knowledge.
210
+ Bilingual queries (Arabic + English) for cross-lingual drift detection.
211
+ """
212
+ samples = []
213
+ sample_id = 0
214
+
215
+ for category, items in ARABIC_KNOWLEDGE.items():
216
+ for item in items:
217
+ temporal_variants_ar = []
218
+ temporal_variants_en = []
219
+
220
+ for year in TIMESTAMP_YEARS:
221
+ temporal_zone = "pre_cutoff" if year < 2024 else (
222
+ "near_cutoff" if year == 2024 else "post_cutoff"
223
+ )
224
+
225
+ # Determine expected answer based on stability
226
+ if item.get("stable", True):
227
+ answer_ar = item.get("answer", item.get("answer_ar", ""))
228
+ answer_en = item.get("answer_en", item.get("answer", ""))
229
+ is_drifted = False
230
+ else:
231
+ change_date = item.get("change_date", "2024-08-01")
232
+ try:
233
+ change_year = int(change_date[:4])
234
+ except (ValueError, TypeError):
235
+ change_year = 2024
236
+
237
+ if year < change_year or (year == change_year and temporal_zone == "pre_cutoff"):
238
+ answer_ar = item.get("old_answer_ar", "")
239
+ answer_en = item.get("old_answer_en", "")
240
+ is_drifted = False
241
+ else:
242
+ answer_ar = item.get("new_answer_ar", "")
243
+ answer_en = item.get("new_answer_en", "")
244
+ is_drifted = True
245
+
246
+ # Arabic variants
247
+ if "templates_ar" in item:
248
+ for template_ar in item["templates_ar"]:
249
+ temporal_variants_ar.append({
250
+ "query": template_ar.format(year=year),
251
+ "year": year,
252
+ "language": "ar",
253
+ "temporal_zone": temporal_zone,
254
+ "expected_answer": answer_ar,
255
+ "is_drifted_query": is_drifted and temporal_zone == "post_cutoff",
256
+ })
257
+
258
+ # English variants
259
+ if "templates_en" in item:
260
+ for template_en in item["templates_en"]:
261
+ temporal_variants_en.append({
262
+ "query": template_en.format(year=year),
263
+ "year": year,
264
+ "language": "en",
265
+ "temporal_zone": temporal_zone,
266
+ "expected_answer": answer_en,
267
+ "is_drifted_query": is_drifted and temporal_zone == "post_cutoff",
268
+ })
269
+
270
+ is_stable = item.get("stable", True)
271
+ sample = {
272
+ "id": f"arabic_{sample_id:04d}",
273
+ "entity": item.get("entity", ""),
274
+ "entity_en": item.get("entity_en", ""),
275
+ "relation": item.get("relation", ""),
276
+ "relation_en": item.get("relation_en", ""),
277
+ "knowledge_type": item.get("knowledge_type", category),
278
+ "category": "no_drift" if is_stable else "unknown_drift",
279
+ "is_bilingual": True,
280
+ "temporal_variants_ar": temporal_variants_ar,
281
+ "temporal_variants_en": temporal_variants_en,
282
+ "note": item.get("note", ""),
283
+ "source": "manual_arabic",
284
+ }
285
+
286
+ if not is_stable:
287
+ sample["old_answer_ar"] = item.get("old_answer_ar", "")
288
+ sample["new_answer_ar"] = item.get("new_answer_ar", "")
289
+ sample["old_answer_en"] = item.get("old_answer_en", "")
290
+ sample["new_answer_en"] = item.get("new_answer_en", "")
291
+ sample["drift_date"] = item.get("change_date", "")
292
+
293
+ samples.append(sample)
294
+ sample_id += 1
295
+
296
+ logger.info(f"Collected {len(samples)} Arabic/MENA knowledge items (Category 5)")
297
+ return samples
298
+
299
+
300
+ def collect_all_static() -> Dict[str, List]:
301
+ """Collect all manually curated/verified facts."""
302
+ return {
303
+ "category_1_stable": collect_stable_facts(),
304
+ "category_3_verified_drift": collect_verified_post_cutoff_changes(),
305
+ "category_4_verified_stable": collect_verified_unchanged(),
306
+ "category_5_arabic": collect_arabic_knowledge(),
307
+ }
308
+
309
+
310
+ if __name__ == "__main__":
311
+ results = collect_all_static()
312
+
313
+ output_path = os.path.join(PROJECT_DIR, "data", "static_facts.json")
314
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
315
+ with open(output_path, 'w', encoding='utf-8') as f:
316
+ json.dump(results, f, indent=2, ensure_ascii=False)
317
+
318
+ logger.info(f"Saved to {output_path}")
319
+
320
+ # Print summary
321
+ total = sum(len(v) for v in results.values())
322
+ print(f"\nTotal static facts collected: {total}")
323
+ for cat, items in results.items():
324
+ print(f" {cat}: {len(items)}")
collectors/wikidata_collector.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Wikidata Temporal Facts Collector
3
+ =================================
4
+ Queries Wikidata SPARQL endpoint for facts that:
5
+ - Changed before model cutoff (Category 2: known drift)
6
+ - Changed after model cutoff (Category 3: unknown drift)
7
+ - Remained stable (Category 4: no drift)
8
+ """
9
+
10
+ import json
11
+ import os
12
+ import time
13
+ import requests
14
+ import logging
15
+ from typing import List, Dict, Optional
16
+ from datetime import datetime
17
+
18
+ import sys
19
+ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
20
+ PROJECT_DIR = os.path.dirname(SCRIPT_DIR)
21
+ sys.path.insert(0, PROJECT_DIR)
22
+ from configs.config import (
23
+ TEMPORAL_RELATIONS, MODEL_CUTOFF, WIKIDATA_ENDPOINT,
24
+ WIKIDATA_USER_AGENT, RESULTS_PER_QUERY,
25
+ SPARQL_POST_CUTOFF_CHANGES, SPARQL_PRE_CUTOFF_CHANGES,
26
+ SPARQL_STABLE_TEMPORAL, TIMESTAMP_YEARS
27
+ )
28
+
29
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ def query_wikidata(sparql_query: str, max_retries: int = 3) -> List[Dict]:
34
+ """Execute a SPARQL query against Wikidata with retry logic."""
35
+ headers = {
36
+ 'User-Agent': WIKIDATA_USER_AGENT,
37
+ 'Accept': 'application/json'
38
+ }
39
+
40
+ for attempt in range(max_retries):
41
+ try:
42
+ response = requests.get(
43
+ WIKIDATA_ENDPOINT,
44
+ params={'query': sparql_query, 'format': 'json'},
45
+ headers=headers,
46
+ timeout=120
47
+ )
48
+
49
+ if response.status_code == 429:
50
+ wait_time = 60 * (attempt + 1)
51
+ logger.warning(f"Rate limited. Waiting {wait_time}s...")
52
+ time.sleep(wait_time)
53
+ continue
54
+
55
+ response.raise_for_status()
56
+ data = response.json()
57
+ results = data.get('results', {}).get('bindings', [])
58
+ logger.info(f"Query returned {len(results)} results")
59
+ return results
60
+
61
+ except requests.exceptions.Timeout:
62
+ logger.warning(f"Timeout on attempt {attempt + 1}")
63
+ time.sleep(30)
64
+ except Exception as e:
65
+ logger.error(f"Error on attempt {attempt + 1}: {e}")
66
+ time.sleep(10)
67
+
68
+ logger.error("All retries exhausted")
69
+ return []
70
+
71
+
72
+ def parse_wikidata_result(result: Dict) -> Dict:
73
+ """Parse a single Wikidata SPARQL result binding into a clean dict."""
74
+ parsed = {}
75
+ for key, value in result.items():
76
+ if value['type'] == 'uri':
77
+ # Extract QID from URI
78
+ parsed[key] = value['value'].split('/')[-1]
79
+ elif value['type'] == 'literal':
80
+ parsed[key] = value['value']
81
+ return parsed
82
+
83
+
84
+ def collect_post_cutoff_changes(relation: Dict) -> List[Dict]:
85
+ """
86
+ Category 3: Facts that changed AFTER the model cutoff.
87
+ The model doesn't know the new answer.
88
+ """
89
+ logger.info(f"Collecting post-cutoff changes for: {relation['label']}")
90
+
91
+ query = SPARQL_POST_CUTOFF_CHANGES.format(
92
+ prop=relation['property'],
93
+ subject_type=relation['subject_type'],
94
+ cutoff_date=MODEL_CUTOFF,
95
+ limit=RESULTS_PER_QUERY
96
+ )
97
+
98
+ raw_results = query_wikidata(query)
99
+
100
+ samples = []
101
+ for r in raw_results:
102
+ parsed = parse_wikidata_result(r)
103
+
104
+ subject_label = parsed.get('subjectLabel', '')
105
+ old_value_label = parsed.get('old_valueLabel', '')
106
+ new_value_label = parsed.get('new_valueLabel', '')
107
+
108
+ # Skip if labels are QIDs (unresolved)
109
+ if subject_label.startswith('Q') or old_value_label.startswith('Q'):
110
+ continue
111
+
112
+ sample = {
113
+ "entity": subject_label,
114
+ "entity_qid": parsed.get('subject', ''),
115
+ "relation": relation['label'],
116
+ "relation_property": relation['property'],
117
+ "knowledge_type": relation['knowledge_type'],
118
+ "category": "unknown_drift", # Category 3
119
+ "old_answer": old_value_label,
120
+ "new_answer": new_value_label,
121
+ "old_start": parsed.get('old_start', ''),
122
+ "old_end": parsed.get('old_end', ''),
123
+ "new_start": parsed.get('new_start', ''),
124
+ "drift_date": parsed.get('old_end', ''),
125
+ "model_likely_answer": old_value_label, # Model probably knows old answer
126
+ "correct_answer_2025": new_value_label,
127
+ "templates": relation['templates'],
128
+ "source": "wikidata_sparql"
129
+ }
130
+ samples.append(sample)
131
+
132
+ logger.info(f" Found {len(samples)} post-cutoff changes for {relation['label']}")
133
+ return samples
134
+
135
+
136
+ def collect_pre_cutoff_changes(relation: Dict) -> List[Dict]:
137
+ """
138
+ Category 2: Facts that changed BEFORE the model cutoff.
139
+ The model should know both old and new answers.
140
+ """
141
+ logger.info(f"Collecting pre-cutoff changes for: {relation['label']}")
142
+
143
+ query = SPARQL_PRE_CUTOFF_CHANGES.format(
144
+ prop=relation['property'],
145
+ subject_type=relation['subject_type'],
146
+ cutoff_date=MODEL_CUTOFF,
147
+ limit=RESULTS_PER_QUERY
148
+ )
149
+
150
+ raw_results = query_wikidata(query)
151
+
152
+ samples = []
153
+ for r in raw_results:
154
+ parsed = parse_wikidata_result(r)
155
+
156
+ subject_label = parsed.get('subjectLabel', '')
157
+ old_value_label = parsed.get('old_valueLabel', '')
158
+ new_value_label = parsed.get('new_valueLabel', '')
159
+
160
+ if subject_label.startswith('Q') or old_value_label.startswith('Q'):
161
+ continue
162
+
163
+ sample = {
164
+ "entity": subject_label,
165
+ "entity_qid": parsed.get('subject', ''),
166
+ "relation": relation['label'],
167
+ "relation_property": relation['property'],
168
+ "knowledge_type": relation['knowledge_type'],
169
+ "category": "known_drift", # Category 2
170
+ "old_answer": old_value_label,
171
+ "new_answer": new_value_label,
172
+ "old_start": parsed.get('old_start', ''),
173
+ "old_end": parsed.get('old_end', ''),
174
+ "new_start": parsed.get('new_start', ''),
175
+ "drift_date": parsed.get('old_end', ''),
176
+ "templates": relation['templates'],
177
+ "source": "wikidata_sparql"
178
+ }
179
+ samples.append(sample)
180
+
181
+ logger.info(f" Found {len(samples)} pre-cutoff changes for {relation['label']}")
182
+ return samples
183
+
184
+
185
+ def collect_stable_temporal(relation: Dict) -> List[Dict]:
186
+ """
187
+ Category 4: Facts of the same type that have NOT changed.
188
+ Same relation type as changing facts, but answer is stable.
189
+ """
190
+ logger.info(f"Collecting stable temporal facts for: {relation['label']}")
191
+
192
+ query = SPARQL_STABLE_TEMPORAL.format(
193
+ prop=relation['property'],
194
+ subject_type=relation['subject_type'],
195
+ cutoff_date=MODEL_CUTOFF,
196
+ limit=RESULTS_PER_QUERY
197
+ )
198
+
199
+ raw_results = query_wikidata(query)
200
+
201
+ samples = []
202
+ for r in raw_results:
203
+ parsed = parse_wikidata_result(r)
204
+
205
+ subject_label = parsed.get('subjectLabel', '')
206
+ value_label = parsed.get('valueLabel', '')
207
+
208
+ if subject_label.startswith('Q') or value_label.startswith('Q'):
209
+ continue
210
+
211
+ sample = {
212
+ "entity": subject_label,
213
+ "entity_qid": parsed.get('subject', ''),
214
+ "relation": relation['label'],
215
+ "relation_property": relation['property'],
216
+ "knowledge_type": relation['knowledge_type'],
217
+ "category": "no_drift", # Category 4
218
+ "answer": value_label,
219
+ "stable_since": parsed.get('start_time', 'unknown'),
220
+ "templates": relation['templates'],
221
+ "source": "wikidata_sparql"
222
+ }
223
+ samples.append(sample)
224
+
225
+ logger.info(f" Found {len(samples)} stable facts for {relation['label']}")
226
+ return samples
227
+
228
+
229
+ def generate_temporal_variants(sample: Dict) -> List[Dict]:
230
+ """
231
+ Generate queries at different timestamps for a single sample.
232
+ This creates the temporal contrast needed for mechanistic interpretability.
233
+ """
234
+ variants = []
235
+ templates = sample.get('templates', [])
236
+ if not templates:
237
+ return variants
238
+
239
+ template = templates[0] # Use primary template
240
+
241
+ for year in TIMESTAMP_YEARS:
242
+ temporal_zone = "pre_cutoff" if year < 2024 else (
243
+ "near_cutoff" if year == 2024 else "post_cutoff"
244
+ )
245
+
246
+ if sample['category'] == 'unknown_drift':
247
+ # For post-cutoff drift: old answer was true pre-cutoff, new answer post-cutoff
248
+ if year <= 2024:
249
+ answer = sample.get('old_answer', sample.get('answer', ''))
250
+ is_correct = True
251
+ else:
252
+ answer = sample.get('new_answer', '')
253
+ is_correct = True # This is the ground truth
254
+ model_likely_answer = sample.get('old_answer', '')
255
+ elif sample['category'] == 'known_drift':
256
+ # Model should know both
257
+ old_end = sample.get('old_end', '')
258
+ if old_end:
259
+ try:
260
+ change_year = int(old_end[:4])
261
+ answer = sample['old_answer'] if year < change_year else sample['new_answer']
262
+ except (ValueError, IndexError):
263
+ answer = sample.get('new_answer', '')
264
+ else:
265
+ answer = sample.get('new_answer', '')
266
+ is_correct = True
267
+ elif sample['category'] == 'no_drift':
268
+ answer = sample.get('answer', '')
269
+ is_correct = True
270
+ else:
271
+ answer = sample.get('answer', '')
272
+ is_correct = True
273
+
274
+ query_text = template.format(
275
+ year=year,
276
+ subject=sample.get('entity', ''),
277
+ object=answer
278
+ )
279
+ # Also create the question version (without answer filled in)
280
+ query_question = template.format(
281
+ year=year,
282
+ subject=sample.get('entity', ''),
283
+ object='___'
284
+ )
285
+
286
+ variant = {
287
+ "query": query_question,
288
+ "query_with_answer": query_text,
289
+ "year": year,
290
+ "temporal_zone": temporal_zone,
291
+ "expected_answer": answer,
292
+ "entity": sample.get('entity', ''),
293
+ "relation": sample.get('relation', ''),
294
+ "knowledge_type": sample.get('knowledge_type', ''),
295
+ "category": sample['category'],
296
+ }
297
+
298
+ if sample['category'] == 'unknown_drift' and year > 2024:
299
+ variant["model_likely_answer"] = sample.get('old_answer', '')
300
+ variant["is_drifted_query"] = True
301
+ else:
302
+ variant["is_drifted_query"] = False
303
+
304
+ variants.append(variant)
305
+
306
+ return variants
307
+
308
+
309
+ def collect_all_wikidata() -> Dict[str, List]:
310
+ """Run all Wikidata collection queries and return organized results."""
311
+ all_results = {
312
+ "category_2_known_drift": [],
313
+ "category_3_unknown_drift": [],
314
+ "category_4_no_drift": [],
315
+ }
316
+
317
+ for relation in TEMPORAL_RELATIONS:
318
+ logger.info(f"\n{'='*60}")
319
+ logger.info(f"Processing relation: {relation['label']} ({relation['property']})")
320
+ logger.info(f"{'='*60}")
321
+
322
+ # Category 3: Post-cutoff changes
323
+ post_cutoff = collect_post_cutoff_changes(relation)
324
+ all_results["category_3_unknown_drift"].extend(post_cutoff)
325
+
326
+ # Be nice to Wikidata
327
+ time.sleep(2)
328
+
329
+ # Category 2: Pre-cutoff changes
330
+ pre_cutoff = collect_pre_cutoff_changes(relation)
331
+ all_results["category_2_known_drift"].extend(pre_cutoff)
332
+
333
+ time.sleep(2)
334
+
335
+ # Category 4: Stable facts
336
+ stable = collect_stable_temporal(relation)
337
+ all_results["category_4_no_drift"].extend(stable)
338
+
339
+ time.sleep(2)
340
+
341
+ # Log summary
342
+ logger.info(f"\n{'='*60}")
343
+ logger.info("WIKIDATA COLLECTION SUMMARY")
344
+ logger.info(f"{'='*60}")
345
+ for cat, samples in all_results.items():
346
+ logger.info(f" {cat}: {len(samples)} samples")
347
+
348
+ return all_results
349
+
350
+
351
+ if __name__ == "__main__":
352
+ results = collect_all_wikidata()
353
+
354
+ output_path = os.path.join(PROJECT_DIR, "data", "wikidata_raw.json")
355
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
356
+ with open(output_path, 'w', encoding='utf-8') as f:
357
+ json.dump(results, f, indent=2, ensure_ascii=False)
358
+
359
+ logger.info(f"Saved to {output_path}")
collectors/wikipedia_collector.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Wikipedia-based Fact Collector (Alternative)
3
+ =============================================
4
+ Uses Wikipedia's API to collect current facts for verification.
5
+ Use this if Wikidata SPARQL endpoint is unavailable.
6
+
7
+ This script fetches current information from Wikipedia to VERIFY
8
+ whether facts have changed post-cutoff. It doesn't replace the
9
+ Wikidata collector but supplements it.
10
+
11
+ Usage:
12
+ python wikipedia_collector.py
13
+
14
+ NOTE: For full Wikidata SPARQL collection, run wikidata_collector.py
15
+ on your local machine where query.wikidata.org is accessible.
16
+ """
17
+
18
+ import json
19
+ import os
20
+ import re
21
+ import time
22
+ import logging
23
+ import requests
24
+ from typing import Dict, List, Optional
25
+
26
+ import sys
27
+ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
28
+ PROJECT_DIR = os.path.dirname(SCRIPT_DIR)
29
+ sys.path.insert(0, PROJECT_DIR)
30
+
31
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
32
+ logger = logging.getLogger(__name__)
33
+
34
+ WIKIPEDIA_API = "https://en.wikipedia.org/w/api.php"
35
+
36
+
37
+ def get_wikipedia_extract(title: str, sentences: int = 3) -> Optional[str]:
38
+ """Get the opening extract from a Wikipedia article."""
39
+ params = {
40
+ "action": "query",
41
+ "titles": title,
42
+ "prop": "extracts",
43
+ "exsentences": sentences,
44
+ "exlimit": 1,
45
+ "explaintext": True,
46
+ "format": "json",
47
+ }
48
+
49
+ try:
50
+ response = requests.get(WIKIPEDIA_API, params=params, timeout=30)
51
+ response.raise_for_status()
52
+ data = response.json()
53
+
54
+ pages = data.get("query", {}).get("pages", {})
55
+ for page_id, page_data in pages.items():
56
+ if page_id == "-1":
57
+ return None
58
+ return page_data.get("extract", "")
59
+ except Exception as e:
60
+ logger.error(f"Error fetching Wikipedia for '{title}': {e}")
61
+ return None
62
+
63
+
64
+ def verify_entity_role_facts() -> List[Dict]:
65
+ """
66
+ Verify current holders of positions by checking Wikipedia.
67
+ Returns verified facts with current information.
68
+ """
69
+ # Entities to verify — these are our Category 3/4 candidates
70
+ entities_to_check = [
71
+ # (Wikipedia article title, what to look for, relation type)
72
+ ("Prime_Minister_of_the_United_Kingdom", "prime minister", "head_of_government"),
73
+ ("President_of_the_United_States", "president", "head_of_state"),
74
+ ("Prime_Minister_of_Japan", "prime minister", "head_of_government"),
75
+ ("Chancellor_of_Germany", "chancellor", "head_of_government"),
76
+ ("Prime_Minister_of_Canada", "prime minister", "head_of_government"),
77
+ ("Prime_Minister_of_India", "prime minister", "head_of_government"),
78
+ ("President_of_France", "president", "head_of_state"),
79
+ ("President_of_Russia", "president", "head_of_state"),
80
+ ("President_of_China", "president", "head_of_state"),
81
+ ("King_of_Saudi_Arabia", "king", "head_of_state"),
82
+ ("President_of_the_United_Arab_Emirates", "president", "head_of_state"),
83
+ ("President_of_Egypt", "president", "head_of_state"),
84
+ ("President_of_South_Korea", "president", "head_of_state"),
85
+ ("President_of_Brazil", "president", "head_of_state"),
86
+ ("President_of_Syria", "president/leader", "head_of_state"),
87
+ # Companies
88
+ ("Apple_Inc.", "CEO", "ceo"),
89
+ ("Microsoft", "CEO", "ceo"),
90
+ ("Amazon_(company)", "CEO", "ceo"),
91
+ ("Alphabet_Inc.", "CEO", "ceo"),
92
+ ("Tesla,_Inc.", "CEO", "ceo"),
93
+ ("Meta_Platforms", "CEO", "ceo"),
94
+ ("OpenAI", "CEO", "ceo"),
95
+ ]
96
+
97
+ verified_facts = []
98
+
99
+ for article_title, role, relation in entities_to_check:
100
+ logger.info(f"Checking: {article_title}")
101
+
102
+ extract = get_wikipedia_extract(article_title.replace("_", " "))
103
+ if extract:
104
+ verified_facts.append({
105
+ "article": article_title,
106
+ "role": role,
107
+ "relation": relation,
108
+ "wikipedia_extract": extract,
109
+ "checked_date": "2025-02-25",
110
+ "note": "Extract from Wikipedia — manually verify the current holder",
111
+ })
112
+
113
+ time.sleep(1) # Be nice to Wikipedia
114
+
115
+ return verified_facts
116
+
117
+
118
+ def collect_recent_events_from_wikipedia() -> List[Dict]:
119
+ """
120
+ Collect recent notable events from Wikipedia's Current Events portal.
121
+ These are events that happened post-cutoff (after Aug 2024).
122
+ """
123
+ # Wikipedia Current Events pages by month
124
+ months = [
125
+ "September_2024", "October_2024", "November_2024",
126
+ "December_2024", "January_2025", "February_2025"
127
+ ]
128
+
129
+ events = []
130
+
131
+ for month in months:
132
+ title = f"Portal:Current_events/{month}"
133
+ extract = get_wikipedia_extract(title, sentences=10)
134
+
135
+ if extract:
136
+ events.append({
137
+ "month": month,
138
+ "extract": extract,
139
+ "note": "Raw extract — process manually for specific fact changes",
140
+ })
141
+
142
+ time.sleep(1)
143
+
144
+ return events
145
+
146
+
147
+ if __name__ == "__main__":
148
+ logger.info("Verifying entity-role facts via Wikipedia...")
149
+ verified = verify_entity_role_facts()
150
+
151
+ output_path = os.path.join(PROJECT_DIR, "data", "wikipedia_verified.json")
152
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
153
+ with open(output_path, 'w', encoding='utf-8') as f:
154
+ json.dump(verified, f, indent=2, ensure_ascii=False)
155
+
156
+ logger.info(f"Saved {len(verified)} verified facts to {output_path}")
157
+
158
+ # Print summaries
159
+ print("\n" + "="*60)
160
+ print("WIKIPEDIA VERIFICATION RESULTS")
161
+ print("="*60)
162
+ for fact in verified:
163
+ print(f"\n📌 {fact['article'].replace('_', ' ')}")
164
+ extract_preview = fact['wikipedia_extract'][:200] + "..."
165
+ print(f" {extract_preview}")
configs/__init__.py ADDED
File without changes
configs/config.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Configuration for Knowledge Drift Dataset Collection
3
+ =====================================================
4
+ Model: Qwen 2.5 7B
5
+ Knowledge Cutoff: August 2024
6
+ Collection Date: February 2025
7
+ """
8
+
9
+ # ============================================================
10
+ # TEMPORAL BOUNDARIES
11
+ # ============================================================
12
+ MODEL_CUTOFF = "2024-08-01"
13
+ COLLECTION_DATE = "2025-02-25"
14
+
15
+ # Time periods for temporal variants
16
+ TIMESTAMP_YEARS = [2015, 2018, 2020, 2022, 2023, 2024, 2025]
17
+
18
+ # ============================================================
19
+ # WIKIDATA PROPERTIES FOR TEMPORAL FACTS
20
+ # ============================================================
21
+ TEMPORAL_RELATIONS = [
22
+ {
23
+ "property": "P6",
24
+ "label": "head of government",
25
+ "knowledge_type": "entity_role",
26
+ "templates": [
27
+ "In {year}, the head of government of {subject} was {object}.",
28
+ "Who was the head of government of {subject} in {year}?",
29
+ "As of {year}, {subject} was led by {object}.",
30
+ ],
31
+ "subject_type": "Q6256",
32
+ "example": "UK → PM"
33
+ },
34
+ {
35
+ "property": "P35",
36
+ "label": "head of state",
37
+ "knowledge_type": "entity_role",
38
+ "templates": [
39
+ "In {year}, the head of state of {subject} was {object}.",
40
+ "Who was the head of state of {subject} in {year}?",
41
+ ],
42
+ "subject_type": "Q6256",
43
+ "example": "US → President"
44
+ },
45
+ {
46
+ "property": "P169",
47
+ "label": "chief executive officer",
48
+ "knowledge_type": "entity_role",
49
+ "templates": [
50
+ "In {year}, the CEO of {subject} was {object}.",
51
+ "Who was the CEO of {subject} in {year}?",
52
+ "As of {year}, {subject} was led by CEO {object}.",
53
+ ],
54
+ "subject_type": "Q4830453",
55
+ "example": "Google → CEO"
56
+ },
57
+ {
58
+ "property": "P286",
59
+ "label": "head coach",
60
+ "knowledge_type": "entity_role",
61
+ "templates": [
62
+ "In {year}, the head coach of {subject} was {object}.",
63
+ "Who coached {subject} in {year}?",
64
+ ],
65
+ "subject_type": "Q476028",
66
+ "example": "Real Madrid → Coach"
67
+ },
68
+ ]
69
+
70
+ # ============================================================
71
+ # WIKIDATA SPARQL QUERIES
72
+ # ============================================================
73
+
74
+ SPARQL_POST_CUTOFF_CHANGES = """
75
+ SELECT DISTINCT ?subject ?subjectLabel ?property ?propertyLabel
76
+ ?old_value ?old_valueLabel ?old_start ?old_end
77
+ ?new_value ?new_valueLabel ?new_start
78
+ WHERE {{
79
+ ?subject p:{prop} ?new_statement .
80
+ ?new_statement ps:{prop} ?new_value .
81
+ OPTIONAL {{ ?new_statement pq:P580 ?new_start . }}
82
+ ?subject p:{prop} ?old_statement .
83
+ ?old_statement ps:{prop} ?old_value .
84
+ OPTIONAL {{ ?old_statement pq:P580 ?old_start . }}
85
+ OPTIONAL {{ ?old_statement pq:P582 ?old_end . }}
86
+ FILTER(BOUND(?old_end) && ?old_end >= "{cutoff_date}"^^xsd:dateTime)
87
+ FILTER(?new_value != ?old_value)
88
+ ?subject wdt:P31/wdt:P279* wd:{subject_type} .
89
+ SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en". }}
90
+ }}
91
+ LIMIT {limit}
92
+ """
93
+
94
+ SPARQL_PRE_CUTOFF_CHANGES = """
95
+ SELECT DISTINCT ?subject ?subjectLabel
96
+ ?old_value ?old_valueLabel ?old_start ?old_end
97
+ ?new_value ?new_valueLabel ?new_start ?new_end
98
+ WHERE {{
99
+ ?subject p:{prop} ?new_statement .
100
+ ?new_statement ps:{prop} ?new_value .
101
+ OPTIONAL {{ ?new_statement pq:P580 ?new_start . }}
102
+ OPTIONAL {{ ?new_statement pq:P582 ?new_end . }}
103
+ ?subject p:{prop} ?old_statement .
104
+ ?old_statement ps:{prop} ?old_value .
105
+ OPTIONAL {{ ?old_statement pq:P580 ?old_start . }}
106
+ ?old_statement pq:P582 ?old_end .
107
+ FILTER(?old_end >= "2020-01-01"^^xsd:dateTime && ?old_end < "{cutoff_date}"^^xsd:dateTime)
108
+ FILTER(?new_value != ?old_value)
109
+ ?subject wdt:P31/wdt:P279* wd:{subject_type} .
110
+ SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en". }}
111
+ }}
112
+ LIMIT {limit}
113
+ """
114
+
115
+ SPARQL_STABLE_TEMPORAL = """
116
+ SELECT DISTINCT ?subject ?subjectLabel
117
+ ?value ?valueLabel ?start_time
118
+ WHERE {{
119
+ ?subject p:{prop} ?statement .
120
+ ?statement ps:{prop} ?value .
121
+ OPTIONAL {{ ?statement pq:P580 ?start_time . }}
122
+ FILTER NOT EXISTS {{ ?statement pq:P582 ?end_time . }}
123
+ FILTER(!BOUND(?start_time) || ?start_time < "{cutoff_date}"^^xsd:dateTime)
124
+ ?subject wdt:P31/wdt:P279* wd:{subject_type} .
125
+ SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en". }}
126
+ }}
127
+ LIMIT {limit}
128
+ """
129
+
130
+ # ============================================================
131
+ # STABLE FACTS (Category 1)
132
+ # ============================================================
133
+
134
+ STABLE_FACTS = {
135
+ "geographical": [
136
+ {"query": "The capital of {country} is ___", "pairs": [
137
+ ("France", "Paris"), ("Japan", "Tokyo"), ("Egypt", "Cairo"),
138
+ ("Germany", "Berlin"), ("Brazil", "Brasília"), ("Australia", "Canberra"),
139
+ ("Canada", "Ottawa"), ("Italy", "Rome"), ("South Korea", "Seoul"),
140
+ ("Saudi Arabia", "Riyadh"), ("United Arab Emirates", "Abu Dhabi"),
141
+ ("Morocco", "Rabat"), ("Turkey", "Ankara"), ("India", "New Delhi"),
142
+ ("China", "Beijing"), ("Russia", "Moscow"), ("Mexico", "Mexico City"),
143
+ ("Argentina", "Buenos Aires"), ("South Africa", "Pretoria"),
144
+ ("Indonesia", "Jakarta"), ("Thailand", "Bangkok"), ("Vietnam", "Hanoi"),
145
+ ("Poland", "Warsaw"), ("Sweden", "Stockholm"), ("Norway", "Oslo"),
146
+ ("Spain", "Madrid"), ("Portugal", "Lisbon"), ("Greece", "Athens"),
147
+ ("Ireland", "Dublin"), ("Switzerland", "Bern"),
148
+ ]},
149
+ {"query": "The largest continent by area is ___", "answer": "Asia"},
150
+ {"query": "The longest river in the world is ___", "answer": "the Nile"},
151
+ {"query": "The highest mountain in the world is ___", "answer": "Mount Everest"},
152
+ {"query": "The largest ocean is ___", "answer": "the Pacific Ocean"},
153
+ {"query": "The largest desert in the world is ___", "answer": "the Sahara Desert"},
154
+ {"query": "{country} is located in {continent}", "pairs": [
155
+ ("Japan", "Asia"), ("Brazil", "South America"), ("Nigeria", "Africa"),
156
+ ("France", "Europe"), ("Australia", "Oceania"), ("Egypt", "Africa"),
157
+ ("Canada", "North America"), ("India", "Asia"), ("Argentina", "South America"),
158
+ ]},
159
+ ],
160
+ "scientific": [
161
+ {"query": "Water boils at ___ degrees Celsius at standard pressure", "answer": "100"},
162
+ {"query": "The speed of light is approximately ___ km/s", "answer": "300,000"},
163
+ {"query": "The chemical formula for water is ___", "answer": "H2O"},
164
+ {"query": "The chemical formula for table salt is ___", "answer": "NaCl"},
165
+ {"query": "The atomic number of hydrogen is ___", "answer": "1"},
166
+ {"query": "The atomic number of carbon is ___", "answer": "6"},
167
+ {"query": "The atomic number of oxygen is ___", "answer": "8"},
168
+ {"query": "DNA stands for ___", "answer": "deoxyribonucleic acid"},
169
+ {"query": "The Earth orbits around ___", "answer": "the Sun"},
170
+ {"query": "Photosynthesis converts sunlight into ___ for plants", "answer": "energy"},
171
+ {"query": "The force of gravity on Earth is approximately ___ m/s²", "answer": "9.8"},
172
+ {"query": "Absolute zero is ___ degrees Celsius", "answer": "-273.15"},
173
+ ],
174
+ "mathematical": [
175
+ {"query": "The value of pi to two decimal places is ___", "answer": "3.14"},
176
+ {"query": "The square root of 144 is ___", "answer": "12"},
177
+ {"query": "The square root of 169 is ___", "answer": "13"},
178
+ {"query": "2 to the power of 10 is ___", "answer": "1024"},
179
+ {"query": "The sum of angles in a triangle is ___ degrees", "answer": "180"},
180
+ {"query": "The derivative of x² is ___", "answer": "2x"},
181
+ {"query": "The integral of 2x is ___", "answer": "x²"},
182
+ {"query": "Euler's number e is approximately ___", "answer": "2.718"},
183
+ {"query": "The Pythagorean theorem states that a² + b² = ___", "answer": "c²"},
184
+ {"query": "The factorial of 5 (5!) is ___", "answer": "120"},
185
+ ],
186
+ "historical": [
187
+ {"query": "World War I started in ___", "answer": "1914"},
188
+ {"query": "World War II ended in ___", "answer": "1945"},
189
+ {"query": "The first moon landing was in ___", "answer": "1969"},
190
+ {"query": "The Berlin Wall fell in ___", "answer": "1989"},
191
+ {"query": "The United Nations was founded in ___", "answer": "1945"},
192
+ {"query": "The French Revolution began in ___", "answer": "1789"},
193
+ {"query": "The Titanic sank in ___", "answer": "1912"},
194
+ {"query": "The first iPhone was released in ___", "answer": "2007"},
195
+ {"query": "The Internet was invented in ___", "answer": "1969"},
196
+ {"query": "Islam's Prophet Muhammad was born in ___", "answer": "570 CE"},
197
+ {"query": "The pyramids of Giza were built in ancient ___", "answer": "Egypt"},
198
+ ],
199
+ "cultural_religious": [
200
+ {"query": "The holy book of Islam is ___", "answer": "the Quran"},
201
+ {"query": "The holy city of Islam is ___", "answer": "Mecca"},
202
+ {"query": "The language of the Quran is ___", "answer": "Arabic"},
203
+ {"query": "The five pillars of Islam include Shahada, Salah, Zakat, Sawm, and ___", "answer": "Hajj"},
204
+ {"query": "The author of Romeo and Juliet is ___", "answer": "William Shakespeare"},
205
+ {"query": "The Mona Lisa was painted by ___", "answer": "Leonardo da Vinci"},
206
+ {"query": "The theory of relativity was developed by ___", "answer": "Albert Einstein"},
207
+ {"query": "The inventor of the telephone was ___", "answer": "Alexander Graham Bell"},
208
+ ],
209
+ }
210
+
211
+ # ============================================================
212
+ # KNOWN POST-CUTOFF CHANGES (Category 3)
213
+ # ============================================================
214
+
215
+ VERIFIED_POST_CUTOFF_CHANGES = [
216
+ {
217
+ "entity": "United Kingdom",
218
+ "relation": "head of government",
219
+ "knowledge_type": "entity_role",
220
+ "old_answer": "Rishi Sunak",
221
+ "new_answer": "Keir Starmer",
222
+ "change_date": "2024-07-05",
223
+ "templates": [
224
+ "In {year}, the Prime Minister of the United Kingdom was ___",
225
+ "Who was the Prime Minister of the UK in {year}?",
226
+ "As of {year}, the UK was governed by Prime Minister ___",
227
+ ],
228
+ "pre_cutoff_history": [
229
+ {"year": 2020, "answer": "Boris Johnson"},
230
+ {"year": 2022, "answer": "Rishi Sunak"},
231
+ {"year": 2023, "answer": "Rishi Sunak"},
232
+ ]
233
+ },
234
+ {
235
+ "entity": "United States",
236
+ "relation": "president",
237
+ "knowledge_type": "entity_role",
238
+ "old_answer": "Joe Biden",
239
+ "new_answer": "Donald Trump",
240
+ "change_date": "2025-01-20",
241
+ "templates": [
242
+ "In {year}, the President of the United States was ___",
243
+ "Who was the US President in {year}?",
244
+ "As of {year}, the United States was led by President ___",
245
+ ],
246
+ "pre_cutoff_history": [
247
+ {"year": 2018, "answer": "Donald Trump"},
248
+ {"year": 2020, "answer": "Donald Trump"},
249
+ {"year": 2022, "answer": "Joe Biden"},
250
+ {"year": 2023, "answer": "Joe Biden"},
251
+ ]
252
+ },
253
+ {
254
+ "entity": "Japan",
255
+ "relation": "prime minister",
256
+ "knowledge_type": "entity_role",
257
+ "old_answer": "Fumio Kishida",
258
+ "new_answer": "Shigeru Ishiba",
259
+ "change_date": "2024-10-01",
260
+ "templates": [
261
+ "In {year}, the Prime Minister of Japan was ___",
262
+ "Who was Japan's Prime Minister in {year}?",
263
+ ],
264
+ "pre_cutoff_history": [
265
+ {"year": 2020, "answer": "Yoshihide Suga"},
266
+ {"year": 2022, "answer": "Fumio Kishida"},
267
+ {"year": 2023, "answer": "Fumio Kishida"},
268
+ ]
269
+ },
270
+ {
271
+ "entity": "European Commission",
272
+ "relation": "president",
273
+ "knowledge_type": "entity_role",
274
+ "old_answer": "Ursula von der Leyen",
275
+ "new_answer": "Ursula von der Leyen",
276
+ "change_date": "2024-07-18",
277
+ "note": "Same person re-elected — control for change event without answer change",
278
+ "templates": [
279
+ "In {year}, the President of the European Commission was ___",
280
+ ],
281
+ "pre_cutoff_history": [
282
+ {"year": 2020, "answer": "Ursula von der Leyen"},
283
+ {"year": 2023, "answer": "Ursula von der Leyen"},
284
+ ]
285
+ },
286
+ {
287
+ "entity": "Syria",
288
+ "relation": "political situation",
289
+ "knowledge_type": "event",
290
+ "old_answer": "Bashar al-Assad",
291
+ "new_answer": "Ahmad al-Sharaa (transitional government)",
292
+ "change_date": "2024-12-08",
293
+ "templates": [
294
+ "In {year}, the leader of Syria was ___",
295
+ "Who controlled Syria in {year}?",
296
+ ],
297
+ "pre_cutoff_history": [
298
+ {"year": 2020, "answer": "Bashar al-Assad"},
299
+ {"year": 2023, "answer": "Bashar al-Assad"},
300
+ ]
301
+ },
302
+ {
303
+ "entity": "BRICS",
304
+ "relation": "member countries expansion",
305
+ "knowledge_type": "relational",
306
+ "old_answer": "Brazil, Russia, India, China, South Africa",
307
+ "new_answer": "Brazil, Russia, India, China, South Africa, Iran, Egypt, Ethiopia, UAE, Saudi Arabia",
308
+ "change_date": "2024-01-01",
309
+ "note": "Expansion announced 2023 but effective Jan 2024",
310
+ "templates": [
311
+ "In {year}, the members of BRICS included ___",
312
+ "Which countries were part of BRICS in {year}?",
313
+ ],
314
+ "pre_cutoff_history": [
315
+ {"year": 2020, "answer": "Brazil, Russia, India, China, South Africa"},
316
+ {"year": 2023, "answer": "Brazil, Russia, India, China, South Africa"},
317
+ ]
318
+ },
319
+ {
320
+ "entity": "OpenAI",
321
+ "relation": "board and leadership changes",
322
+ "knowledge_type": "relational",
323
+ "old_answer": "nonprofit-controlled structure",
324
+ "new_answer": "for-profit transition announced",
325
+ "change_date": "2024-12-01",
326
+ "templates": [
327
+ "In {year}, OpenAI's corporate structure was ___",
328
+ "As of {year}, OpenAI was organized as ___",
329
+ ],
330
+ "pre_cutoff_history": [
331
+ {"year": 2022, "answer": "nonprofit-controlled with capped-profit subsidiary"},
332
+ {"year": 2023, "answer": "nonprofit-controlled with capped-profit subsidiary"},
333
+ ]
334
+ },
335
+ {
336
+ "entity": "Twitter/X",
337
+ "relation": "CEO",
338
+ "knowledge_type": "entity_role",
339
+ "old_answer": "Linda Yaccarino",
340
+ "new_answer": "Linda Yaccarino",
341
+ "change_date": None,
342
+ "note": "Control: same type (tech CEO) but did NOT change",
343
+ "templates": [
344
+ "In {year}, the CEO of X (formerly Twitter) was ___",
345
+ ],
346
+ "pre_cutoff_history": [
347
+ {"year": 2022, "answer": "Elon Musk (interim)"},
348
+ {"year": 2023, "answer": "Linda Yaccarino"},
349
+ ]
350
+ },
351
+ ]
352
+
353
+ # ============================================================
354
+ # VERIFIED POST-CUTOFF UNCHANGED (Category 4)
355
+ # ============================================================
356
+
357
+ VERIFIED_UNCHANGED_POST_CUTOFF = [
358
+ {
359
+ "entity": "Saudi Arabia",
360
+ "relation": "head of state",
361
+ "knowledge_type": "entity_role",
362
+ "answer": "King Salman bin Abdulaziz",
363
+ "stable_since": "2015-01-23",
364
+ "templates": [
365
+ "In {year}, the King of Saudi Arabia was ___",
366
+ "Who was the King of Saudi Arabia in {year}?",
367
+ ],
368
+ },
369
+ {
370
+ "entity": "United Arab Emirates",
371
+ "relation": "president",
372
+ "knowledge_type": "entity_role",
373
+ "answer": "Mohamed bin Zayed Al Nahyan",
374
+ "stable_since": "2022-05-14",
375
+ "templates": ["In {year}, the President of the UAE was ___"],
376
+ },
377
+ {
378
+ "entity": "Apple Inc.",
379
+ "relation": "CEO",
380
+ "knowledge_type": "entity_role",
381
+ "answer": "Tim Cook",
382
+ "stable_since": "2011-08-24",
383
+ "templates": ["In {year}, the CEO of Apple was ___", "Who was Apple's CEO in {year}?"],
384
+ },
385
+ {
386
+ "entity": "Microsoft",
387
+ "relation": "CEO",
388
+ "knowledge_type": "entity_role",
389
+ "answer": "Satya Nadella",
390
+ "stable_since": "2014-02-04",
391
+ "templates": ["In {year}, the CEO of Microsoft was ___"],
392
+ },
393
+ {
394
+ "entity": "Amazon",
395
+ "relation": "CEO",
396
+ "knowledge_type": "entity_role",
397
+ "answer": "Andy Jassy",
398
+ "stable_since": "2021-07-05",
399
+ "templates": ["In {year}, the CEO of Amazon was ___"],
400
+ },
401
+ {
402
+ "entity": "Russia",
403
+ "relation": "president",
404
+ "knowledge_type": "entity_role",
405
+ "answer": "Vladimir Putin",
406
+ "stable_since": "2012-05-07",
407
+ "templates": ["In {year}, the President of Russia was ___"],
408
+ },
409
+ {
410
+ "entity": "China",
411
+ "relation": "president",
412
+ "knowledge_type": "entity_role",
413
+ "answer": "Xi Jinping",
414
+ "stable_since": "2013-03-14",
415
+ "templates": ["In {year}, the President of China was ___"],
416
+ },
417
+ {
418
+ "entity": "France",
419
+ "relation": "president",
420
+ "knowledge_type": "entity_role",
421
+ "answer": "Emmanuel Macron",
422
+ "stable_since": "2017-05-14",
423
+ "templates": ["In {year}, the President of France was ___"],
424
+ },
425
+ {
426
+ "entity": "Germany",
427
+ "relation": "chancellor",
428
+ "knowledge_type": "entity_role",
429
+ "answer": "Olaf Scholz",
430
+ "stable_since": "2021-12-08",
431
+ "templates": ["In {year}, the Chancellor of Germany was ___"],
432
+ },
433
+ {
434
+ "entity": "Egypt",
435
+ "relation": "president",
436
+ "knowledge_type": "entity_role",
437
+ "answer": "Abdel Fattah el-Sisi",
438
+ "stable_since": "2014-06-08",
439
+ "templates": ["In {year}, the President of Egypt was ___"],
440
+ },
441
+ {
442
+ "entity": "Tesla",
443
+ "relation": "CEO",
444
+ "knowledge_type": "entity_role",
445
+ "answer": "Elon Musk",
446
+ "stable_since": "2008-10-01",
447
+ "templates": ["In {year}, the CEO of Tesla was ___"],
448
+ },
449
+ {
450
+ "entity": "Google/Alphabet",
451
+ "relation": "CEO",
452
+ "knowledge_type": "entity_role",
453
+ "answer": "Sundar Pichai",
454
+ "stable_since": "2015-10-02",
455
+ "templates": ["In {year}, the CEO of Google was ___"],
456
+ },
457
+ ]
458
+
459
+ # ============================================================
460
+ # ARABIC KNOWLEDGE (Category 5)
461
+ # ============================================================
462
+
463
+ ARABIC_KNOWLEDGE = {
464
+ "entity_role_arabic": [
465
+ {
466
+ "entity": "المملكة العربية السعودية",
467
+ "entity_en": "Saudi Arabia",
468
+ "relation": "ولي العهد",
469
+ "relation_en": "Crown Prince",
470
+ "answer": "محمد بن سلمان",
471
+ "answer_en": "Mohammed bin Salman",
472
+ "stable": True,
473
+ "templates_ar": ["في عام {year}، ولي عهد المملكة العربية السعودية هو ___"],
474
+ "templates_en": ["In {year}, the Crown Prince of Saudi Arabia was ___"],
475
+ },
476
+ {
477
+ "entity": "سوريا",
478
+ "entity_en": "Syria",
479
+ "relation": "رئيس",
480
+ "relation_en": "leader",
481
+ "old_answer_ar": "بشار الأسد",
482
+ "new_answer_ar": "أحمد الشرع",
483
+ "old_answer_en": "Bashar al-Assad",
484
+ "new_answer_en": "Ahmad al-Sharaa",
485
+ "stable": False,
486
+ "change_date": "2024-12-08",
487
+ "templates_ar": [
488
+ "في عام {year}، كان رئيس سوريا هو ___",
489
+ "من كان يحكم سوريا في عام {year}؟",
490
+ ],
491
+ "templates_en": [
492
+ "In {year}, the president of Syria was ___",
493
+ "Who ruled Syria in {year}?",
494
+ ],
495
+ },
496
+ {
497
+ "entity": "المملكة المتحدة",
498
+ "entity_en": "United Kingdom",
499
+ "relation": "رئيس الوزراء",
500
+ "relation_en": "Prime Minister",
501
+ "old_answer_ar": "ريشي سوناك",
502
+ "new_answer_ar": "كير ستارمر",
503
+ "old_answer_en": "Rishi Sunak",
504
+ "new_answer_en": "Keir Starmer",
505
+ "stable": False,
506
+ "change_date": "2024-07-05",
507
+ "templates_ar": ["في عام {year}، رئيس وزراء المملكة المتحدة هو ___"],
508
+ "templates_en": ["In {year}, the Prime Minister of the UK was ___"],
509
+ },
510
+ ],
511
+ "islamic_finance": [
512
+ {
513
+ "entity": "هيئة المحاسبة والمراجعة للمؤسسات المالية الإسلامية",
514
+ "entity_en": "AAOIFI",
515
+ "relation": "معايير الصكوك",
516
+ "relation_en": "sukuk standards",
517
+ "templates_ar": ["معايير هيئة AAOIFI للصكوك الإسلامية تتطلب ___"],
518
+ "templates_en": ["AAOIFI standards for Islamic sukuk require ___"],
519
+ "knowledge_type": "procedural",
520
+ "note": "Connect to SAHM project",
521
+ },
522
+ ],
523
+ }
524
+
525
+ # ============================================================
526
+ # WIKIDATA ENDPOINT
527
+ # ============================================================
528
+ WIKIDATA_ENDPOINT = "https://query.wikidata.org/sparql"
529
+ WIKIDATA_USER_AGENT = "KnowledgeDriftResearch/1.0 (academic research)"
530
+ RESULTS_PER_QUERY = 200
convert_sparql_to_samples.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from collections import Counter
3
+
4
+ print('Loading SPARQL updates...')
5
+ with open('data/external/templama_2023_2026_updates_clean.json') as f:
6
+ updates = json.load(f)['updates']
7
+ print(f' {len(updates)} changed entities')
8
+
9
+ print('Loading Dynamic-TempLAMA templates...')
10
+ dyn = {}
11
+ with open('data/external/temporal-robustness/data/dynamic-templama/dataset_from_2019-1-1_to_2022-12-31_per_quarter/test.jsonl') as f:
12
+ for line in f:
13
+ if not line.strip(): continue
14
+ d = json.loads(line)
15
+ eid = d['id'].split('_')[0]
16
+ key = eid + '_' + d['relation']
17
+ if key not in dyn:
18
+ dyn[key] = {'template': d['query'], 'answers': {}}
19
+ ans = d['answer']
20
+ if isinstance(ans, dict): name = ans.get('name', '')
21
+ elif isinstance(ans, list) and ans: name = ans[0].get('name', '')
22
+ else: name = str(ans)
23
+ dyn[key]['answers'][d['date']] = name
24
+ print(f' {len(dyn)} entity-relation pairs')
25
+
26
+ YEARS = [2023, 2024, 2025, 2026]
27
+ CUTOFFS = {'llama2': '2022-09-01', 'mistral': '2023-12-01', 'llama31': '2023-12-01', 'qwen25': '2023-12-31', 'gemma2': '2024-06-01'}
28
+
29
+ print('Generating samples...')
30
+ samples = []
31
+ sid = 0
32
+ skipped = 0
33
+
34
+ for uid, u in updates.items():
35
+ dd = u.get('drift_date', '')[:10]
36
+ try: dy = int(dd[:4])
37
+ except: skipped += 1; continue
38
+ for year in YEARS:
39
+ if year >= dy: exp, drifted, model_ans = u['new_answer'], True, u['old_answer']
40
+ else: exp, drifted, model_ans = u['old_answer'], False, ''
41
+ tz = 'pre_cutoff' if year < 2023 else ('near_cutoff' if year == 2023 else 'post_cutoff')
42
+ query = 'In ' + str(year) + ', ' + u['query_template'].replace('_X_', '___')
43
+ s = {'sample_id': 'sparql_' + str(sid).zfill(6), 'query': query, 'expected_answer': exp, 'year': year, 'temporal_zone': tz, 'is_drifted_query': drifted, 'model_likely_answer': model_ans, 'language': 'en', 'entity': u['entity_id'], 'relation': u['relation_name'], 'knowledge_type': 'entity_role', 'category': 'unknown_drift' if drifted else 'known_drift', 'source': 'sparql_extension', 'parent_id': u['entity_id'], 'drift_date': u.get('drift_date', '')}
44
+ for m, c in CUTOFFS.items(): s['is_drifted_' + m] = (dd > c) and (year >= dy)
45
+ samples.append(s)
46
+ sid += 1
47
+
48
+ with open('data/external/sparql_extension_samples.json', 'w') as f:
49
+ json.dump({'metadata': {'total': len(samples), 'skipped': skipped}, 'samples': samples}, f, indent=2, ensure_ascii=False)
50
+
51
+ print(f'Total: {len(samples)} samples, Skipped: {skipped}')
52
+ for label, fn in [('Categories', 'category'), ('Relations', 'relation')]:
53
+ print(f'{label}:')
54
+ for k, n in Counter(s[fn] for s in samples).most_common(): print(f' {k}: {n}')
55
+ print('Years:')
56
+ for y, n in sorted(Counter(s['year'] for s in samples).items()): print(f' {y}: {n}')
57
+ print('Per-model unknown_drift:')
58
+ for m in CUTOFFS: print(f' {m}: {sum(1 for s in samples if s.get("is_drifted_" + m, False))}')
59
+ print(f'Differential: {sum(1 for s in samples if len(set(s.get("is_drifted_" + m, False) for m in CUTOFFS)) > 1)}')
60
+ print('Saved to: data/external/sparql_extension_samples.json')
cross_model.py ADDED
@@ -0,0 +1,856 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ cross_model.py — Cross-Model Drift Analysis
4
+ =============================================
5
+ Runs AFTER analyze_single.py on 2+ models.
6
+ Uses probe bundles + caches to compare drift representations across architectures.
7
+
8
+ 6 Experiments:
9
+ [CM-1] Full-layer CKA matrix (L_A × L_B per pair, not just best layer)
10
+ [CM-2] Drift score correlation (probe A scores vs probe B scores on shared queries)
11
+ [CM-3] Differential facts (queries drifted for A but stable for B)
12
+ [CM-4] Layer correspondence (best layer as % of depth — universal localization?)
13
+ [CM-5] Neuron overlap (same-dim models only: which neuron indices carry drift?)
14
+ [CM-6] Universality score (aggregate metric for paper abstract)
15
+
16
+ Outputs:
17
+ cross_model_results.json Complete results
18
+ figures/fig_cm1_cka.png Layer-wise CKA heatmaps
19
+ figures/fig_cm2_corr.png Score correlation matrix
20
+ figures/fig_cm3_diff.png Differential facts scatter
21
+ figures/fig_cm4_layers.png Layer correspondence bar
22
+ figures/fig_cm5_neurons.png Neuron overlap (same-dim pairs)
23
+ figures/fig_cm6_summary.png Universality summary
24
+
25
+ Usage:
26
+ # Compare two models
27
+ python cross_model.py --models qwen25 llama31
28
+
29
+ # All available models
30
+ python cross_model.py --all
31
+
32
+ # Quick mode (skip full-layer CKA, just best-layer)
33
+ python cross_model.py --all --quick
34
+ """
35
+
36
+ import argparse
37
+ import json
38
+ import logging
39
+ import time
40
+ import warnings
41
+ from pathlib import Path
42
+
43
+ import numpy as np
44
+ import yaml
45
+
46
+ warnings.filterwarnings("ignore")
47
+ logging.basicConfig(
48
+ level=logging.INFO,
49
+ format="%(asctime)s [%(levelname)s] %(message)s",
50
+ handlers=[logging.StreamHandler()])
51
+ logger = logging.getLogger(__name__)
52
+
53
+
54
+ # ─────────────────────────────────────────────────────────────────────────────
55
+ # CONFIG + DATA LOADING
56
+ # ─────────────────────────────────────────────────────────────────────────────
57
+
58
+ def load_config(path="models.yaml"):
59
+ with open(path) as f:
60
+ return yaml.safe_load(f)
61
+
62
+
63
+ def load_cache(model_dir, model_key):
64
+ path = Path(model_dir) / model_key / f"cached_{model_key}.npz"
65
+ if not path.exists():
66
+ logger.error(f"Cache not found: {path}")
67
+ return None
68
+ results = np.load(str(path), allow_pickle=True)["results"].tolist()
69
+ logger.info(f" [{model_key}] Loaded {len(results)} samples")
70
+ return results
71
+
72
+
73
+ def load_probe_bundle(model_dir, model_key):
74
+ path = Path(model_dir) / model_key / f"probe_bundle_{model_key}.npz"
75
+ if not path.exists():
76
+ logger.warning(f"Probe bundle not found: {path}")
77
+ return None
78
+ d = np.load(str(path), allow_pickle=True)
79
+ bundle = {k: d[k] for k in d.files}
80
+ # Convert scalar items
81
+ for k in ["best_layer", "hidden_dim", "n_samples"]:
82
+ if k in bundle:
83
+ bundle[k] = int(bundle[k])
84
+ for k in ["drift_auroc", "cos_du", "cos_dc"]:
85
+ if k in bundle:
86
+ bundle[k] = float(bundle[k])
87
+ logger.info(f" [{model_key}] Bundle: layer={bundle.get('best_layer')}, "
88
+ f"dim={bundle.get('hidden_dim')}, "
89
+ f"AUROC={bundle.get('drift_auroc', 0):.4f}")
90
+ return bundle
91
+
92
+
93
+ def load_final_results(model_dir, model_key):
94
+ path = Path(model_dir) / model_key / "final_results.json"
95
+ if not path.exists():
96
+ return None
97
+ with open(path) as f:
98
+ return json.load(f)
99
+
100
+
101
+ # ─────────────────────────────────────────────────────────────────────────────
102
+ # PROBE FITTING (lightweight — for scoring shared queries)
103
+ # ─────────────────────────────────────────────────────────────────────────────
104
+
105
+ def soft_threshold(w, lam):
106
+ import torch
107
+ return torch.sign(w) * torch.clamp(torch.abs(w) - lam, min=0.0)
108
+
109
+
110
+ def fit_quick_probe(X_np, y_np, device="cuda:0", lam=1e-3, max_iter=500):
111
+ """Fast probe fit for cross-model scoring."""
112
+ import torch
113
+ X = np.nan_to_num(X_np.astype(np.float32), nan=0., posinf=1e4, neginf=-1e4)
114
+ X = np.clip(X, -1e4, 1e4)
115
+ m = X.mean(0, keepdims=True)
116
+ s = X.std(0, keepdims=True) + 1e-8
117
+ Xt = torch.tensor((X - m) / s, dtype=torch.float32, device=device)
118
+ yt = torch.tensor(y_np.astype(np.float32), device=device)
119
+
120
+ w = torch.zeros(Xt.shape[1], device=device)
121
+ b = torch.zeros(1, device=device)
122
+ lr = 1.0
123
+ for _ in range(max_iter):
124
+ z = torch.clamp(Xt @ w + b, -30, 30)
125
+ p = torch.sigmoid(z)
126
+ L = -((yt * torch.log(p + 1e-12)) +
127
+ (1 - yt) * torch.log(1 - p + 1e-12)).mean()
128
+ e = p - yt
129
+ gw = (Xt.T @ e) / len(yt)
130
+ gb = e.mean(keepdim=True)
131
+ wt = soft_threshold(w - lr * gw, lr * lam)
132
+ bt = b - lr * gb
133
+ Lt = -((yt * torch.log(torch.sigmoid(torch.clamp(Xt @ wt + bt, -30, 30)) + 1e-12)) +
134
+ (1 - yt) * torch.log(1 - torch.sigmoid(torch.clamp(Xt @ wt + bt, -30, 30)) + 1e-12)).mean()
135
+ if Lt > L + 1e-4:
136
+ lr *= 0.5
137
+ else:
138
+ lr = min(lr * 1.05, 10.0)
139
+ if (wt - w).abs().max().item() < 1e-6:
140
+ w, b = wt, bt
141
+ break
142
+ w, b = wt, bt
143
+
144
+ def score(X_new):
145
+ Xn = np.nan_to_num(X_new.astype(np.float32), nan=0., posinf=1e4, neginf=-1e4)
146
+ Xn = np.clip(Xn, -1e4, 1e4)
147
+ Xn = torch.tensor((Xn - m) / s, dtype=torch.float32, device=device)
148
+ with torch.no_grad():
149
+ return torch.sigmoid(torch.clamp(Xn @ w + b, -30, 30)).cpu().numpy()
150
+
151
+ return score, w.cpu().numpy()
152
+
153
+
154
+ # ─────────────────────────────────────────────────────────────────────────────
155
+ # [CM-1] CKA ANALYSIS
156
+ # ─────────────────────────────────────────────────────────────────────────────
157
+
158
+ def linear_cka(Xa, Xb):
159
+ """Centered Kernel Alignment between two representation matrices."""
160
+ def _center(K):
161
+ n = K.shape[0]
162
+ H = np.eye(n) - 1.0 / n
163
+ return H @ K @ H
164
+ Ka = _center(Xa @ Xa.T)
165
+ Kb = _center(Xb @ Xb.T)
166
+ num = np.linalg.norm(Ka.T @ Kb, "fro")
167
+ den = np.linalg.norm(Ka, "fro") * np.linalg.norm(Kb, "fro")
168
+ return float(num / (den + 1e-12))
169
+
170
+
171
+ def cka_analysis(res_a, res_b, key_a, key_b, quick=False):
172
+ """
173
+ [CM-1] CKA between two models.
174
+ If quick=False: full L_A × L_B heatmap.
175
+ If quick=True: just best-layer CKA.
176
+ """
177
+ logger.info(f"[CM-1] CKA: {key_a} vs {key_b}")
178
+
179
+ # Build shared query lookup
180
+ qa = {r["query"]: r for r in res_a}
181
+ qb = {r["query"]: r for r in res_b}
182
+ shared = sorted(set(qa) & set(qb))
183
+ logger.info(f" Shared queries: {len(shared)}")
184
+
185
+ if len(shared) < 50:
186
+ logger.warning(" Too few shared queries for CKA")
187
+ return None
188
+
189
+ # Subsample for speed (CKA is O(n²))
190
+ if len(shared) > 2000:
191
+ np.random.seed(42)
192
+ shared = list(np.random.choice(shared, 2000, replace=False))
193
+
194
+ layers_a = sorted(res_a[0]["hidden_states"].keys())
195
+ layers_b = sorted(res_b[0]["hidden_states"].keys())
196
+
197
+ if quick:
198
+ # Just best layers
199
+ best_a = layers_a[-5:] # top 5 layers
200
+ best_b = layers_b[-5:]
201
+ else:
202
+ # Sample layers evenly (max 10 per model for tractability)
203
+ step_a = max(1, len(layers_a) // 10)
204
+ step_b = max(1, len(layers_b) // 10)
205
+ best_a = layers_a[::step_a]
206
+ best_b = layers_b[::step_b]
207
+
208
+ cka_mat = np.zeros((len(best_a), len(best_b)))
209
+ for i, la in enumerate(best_a):
210
+ Xa = np.array([qa[q]["hidden_states"][la] for q in shared])
211
+ for j, lb in enumerate(best_b):
212
+ Xb = np.array([qb[q]["hidden_states"][lb] for q in shared])
213
+ cka_mat[i, j] = linear_cka(Xa, Xb)
214
+ if (i + 1) % 3 == 0:
215
+ logger.info(f" CKA row {i+1}/{len(best_a)}")
216
+
217
+ best_cka = float(cka_mat.max())
218
+ logger.info(f" Best CKA: {best_cka:.4f}")
219
+
220
+ return {
221
+ "layers_a": best_a, "layers_b": best_b,
222
+ "cka_matrix": cka_mat.tolist(),
223
+ "best_cka": best_cka,
224
+ "n_shared": len(shared),
225
+ }
226
+
227
+
228
+ # ─────────────────────────────────────────────────────────────────────────────
229
+ # [CM-2] SCORE CORRELATION
230
+ # ─────────────────────────────────────────────────────────────────────────────
231
+
232
+ def score_correlation(res_a, res_b, key_a, key_b, bundle_a, bundle_b, device):
233
+ """
234
+ [CM-2] Train probe on each model, score shared queries, correlate.
235
+ """
236
+ from sklearn.metrics import roc_auc_score
237
+ logger.info(f"[CM-2] Score correlation: {key_a} vs {key_b}")
238
+
239
+ qa = {r["query"]: r for r in res_a}
240
+ qb = {r["query"]: r for r in res_b}
241
+ shared = sorted(set(qa) & set(qb))
242
+ logger.info(f" Shared: {len(shared)}")
243
+
244
+ if len(shared) < 50:
245
+ return None
246
+
247
+ bl_a = int(bundle_a["best_layer"])
248
+ bl_b = int(bundle_b["best_layer"])
249
+
250
+ # Train probes on full data
251
+ X_a = np.array([r["hidden_states"][bl_a] for r in res_a])
252
+ y_a = np.array([int(r["is_drifted"]) for r in res_a])
253
+ X_b = np.array([r["hidden_states"][bl_b] for r in res_b])
254
+ y_b = np.array([int(r["is_drifted"]) for r in res_b])
255
+
256
+ score_a, _ = fit_quick_probe(X_a, y_a, device)
257
+ score_b, _ = fit_quick_probe(X_b, y_b, device)
258
+
259
+ # Score shared queries
260
+ Xa_shared = np.array([qa[q]["hidden_states"][bl_a] for q in shared])
261
+ Xb_shared = np.array([qb[q]["hidden_states"][bl_b] for q in shared])
262
+ sa = score_a(Xa_shared)
263
+ sb = score_b(Xb_shared)
264
+
265
+ # Labels for shared
266
+ ya_shared = np.array([int(qa[q]["is_drifted"]) for q in shared])
267
+ yb_shared = np.array([int(qb[q]["is_drifted"]) for q in shared])
268
+
269
+ corr = float(np.corrcoef(sa, sb)[0, 1])
270
+ try:
271
+ auroc_a = roc_auc_score(ya_shared, sa)
272
+ auroc_b = roc_auc_score(yb_shared, sb)
273
+ except Exception:
274
+ auroc_a = auroc_b = 0.5
275
+
276
+ logger.info(f" Score corr: {corr:.4f} "
277
+ f"AUROC_a={auroc_a:.4f} AUROC_b={auroc_b:.4f}")
278
+
279
+ return {
280
+ "correlation": corr,
281
+ "auroc_a_on_shared": auroc_a,
282
+ "auroc_b_on_shared": auroc_b,
283
+ "n_shared": len(shared),
284
+ "scores_a": sa.tolist(),
285
+ "scores_b": sb.tolist(),
286
+ }
287
+
288
+
289
+ # ─────────────────────────────────────────────────────────────────────────────
290
+ # [CM-3] DIFFERENTIAL FACTS
291
+ # ─────────────────────────────────────────────────────────────────────────────
292
+
293
+ def differential_facts(res_a, res_b, key_a, key_b, bundle_a, bundle_b, device):
294
+ """
295
+ [CM-3] Queries where is_drifted differs between models.
296
+ Each probe should detect its own model's drift correctly.
297
+ """
298
+ from sklearn.metrics import roc_auc_score
299
+ logger.info(f"[CM-3] Differential facts: {key_a} vs {key_b}")
300
+
301
+ qa = {r["query"]: r for r in res_a}
302
+ qb = {r["query"]: r for r in res_b}
303
+ shared = sorted(set(qa) & set(qb))
304
+
305
+ # Find differential: drifted for A but not B, or vice versa
306
+ diff_queries = [q for q in shared
307
+ if qa[q]["is_drifted"] != qb[q]["is_drifted"]]
308
+ logger.info(f" Shared={len(shared)}, Differential={len(diff_queries)}")
309
+
310
+ if len(diff_queries) < 20:
311
+ logger.warning(" Too few differential facts")
312
+ return None
313
+
314
+ bl_a = int(bundle_a["best_layer"])
315
+ bl_b = int(bundle_b["best_layer"])
316
+
317
+ # Train probes
318
+ X_a = np.array([r["hidden_states"][bl_a] for r in res_a])
319
+ y_a = np.array([int(r["is_drifted"]) for r in res_a])
320
+ X_b = np.array([r["hidden_states"][bl_b] for r in res_b])
321
+ y_b = np.array([int(r["is_drifted"]) for r in res_b])
322
+ score_a, _ = fit_quick_probe(X_a, y_a, device)
323
+ score_b, _ = fit_quick_probe(X_b, y_b, device)
324
+
325
+ # Score differential queries
326
+ Xa_d = np.array([qa[q]["hidden_states"][bl_a] for q in diff_queries])
327
+ Xb_d = np.array([qb[q]["hidden_states"][bl_b] for q in diff_queries])
328
+ sa = score_a(Xa_d)
329
+ sb = score_b(Xb_d)
330
+ la = np.array([int(qa[q]["is_drifted"]) for q in diff_queries])
331
+ lb = np.array([int(qb[q]["is_drifted"]) for q in diff_queries])
332
+
333
+ try:
334
+ auroc_a = roc_auc_score(la, sa)
335
+ except Exception:
336
+ auroc_a = 0.5
337
+ try:
338
+ auroc_b = roc_auc_score(lb, sb)
339
+ except Exception:
340
+ auroc_b = 0.5
341
+
342
+ # Anti-correlation: when A says drifted and B says stable,
343
+ # score_a should be high and score_b should be low
344
+ score_corr = float(np.corrcoef(sa, sb)[0, 1])
345
+
346
+ # Count categories
347
+ a_only = sum(1 for q in diff_queries
348
+ if qa[q]["is_drifted"] and not qb[q]["is_drifted"])
349
+ b_only = sum(1 for q in diff_queries
350
+ if not qa[q]["is_drifted"] and qb[q]["is_drifted"])
351
+
352
+ logger.info(f" AUROC_a={auroc_a:.4f} AUROC_b={auroc_b:.4f} "
353
+ f"score_corr={score_corr:.4f}")
354
+ logger.info(f" A-only drifted: {a_only} B-only drifted: {b_only}")
355
+
356
+ return {
357
+ "n_differential": len(diff_queries),
358
+ "n_shared": len(shared),
359
+ "a_only_drifted": a_only,
360
+ "b_only_drifted": b_only,
361
+ "auroc_a": auroc_a,
362
+ "auroc_b": auroc_b,
363
+ "score_correlation": score_corr,
364
+ "scores_a": sa.tolist(),
365
+ "scores_b": sb.tolist(),
366
+ "labels_a": la.tolist(),
367
+ "labels_b": lb.tolist(),
368
+ }
369
+
370
+
371
+ # ─────────────────────────────────────────────────────────────────────────────
372
+ # [CM-4] LAYER CORRESPONDENCE
373
+ # ─────────────────────────────────────────────────────────────────────────────
374
+
375
+ def layer_correspondence(all_bundles, all_final):
376
+ """
377
+ [CM-4] Best drift layer as fraction of total depth.
378
+ If all models peak at ~80%, drift localization is universal.
379
+ """
380
+ logger.info("[CM-4] Layer correspondence")
381
+ data = {}
382
+ for key in all_bundles:
383
+ bl = int(all_bundles[key]["best_layer"])
384
+ total = int(all_bundles[key].get("hidden_dim", 0))
385
+ # Get total layers from final results
386
+ fr = all_final.get(key, {})
387
+ n_layers = fr.get("best_layer_results", {}).get("layer", bl) + 1
388
+ # Better: look at probe stability layers
389
+ stab = fr.get("probe_stability", {})
390
+ if "layers" in stab and len(stab["layers"]) > 0:
391
+ n_layers = max(stab["layers"]) + 1
392
+
393
+ frac = bl / max(n_layers, 1)
394
+ auroc = float(all_bundles[key].get("drift_auroc", 0))
395
+ data[key] = {
396
+ "best_layer": bl,
397
+ "n_layers": n_layers,
398
+ "fraction": frac,
399
+ "auroc": auroc,
400
+ }
401
+ logger.info(f" {key}: L{bl}/{n_layers} = {frac:.1%} "
402
+ f"AUROC={auroc:.4f}")
403
+
404
+ fracs = [v["fraction"] for v in data.values()]
405
+ mean_frac = float(np.mean(fracs))
406
+ std_frac = float(np.std(fracs))
407
+ logger.info(f" Mean fraction: {mean_frac:.1%} +/- {std_frac:.1%}")
408
+
409
+ return {
410
+ "per_model": data,
411
+ "mean_fraction": mean_frac,
412
+ "std_fraction": std_frac,
413
+ }
414
+
415
+
416
+ # ─────────────────────────────────────────────────────────────────────────────
417
+ # [CM-5] NEURON OVERLAP (same-dim models only)
418
+ # ─────────────────────────────────────────────────────────────────────────────
419
+
420
+ def neuron_overlap(bundle_a, bundle_b, key_a, key_b):
421
+ """
422
+ [CM-5] For same-dimension models: do the same neuron indices carry drift?
423
+ """
424
+ dim_a = int(bundle_a["hidden_dim"])
425
+ dim_b = int(bundle_b["hidden_dim"])
426
+
427
+ if dim_a != dim_b:
428
+ logger.info(f"[CM-5] {key_a}({dim_a}) vs {key_b}({dim_b}): "
429
+ f"dim mismatch, skipping")
430
+ return None
431
+
432
+ logger.info(f"[CM-5] Neuron overlap: {key_a} vs {key_b} (dim={dim_a})")
433
+
434
+ w_a = bundle_a["w_drift"]
435
+ w_b = bundle_b["w_drift"]
436
+
437
+ active_a = set(np.where(w_a != 0)[0])
438
+ active_b = set(np.where(w_b != 0)[0])
439
+
440
+ inter = len(active_a & active_b)
441
+ union = len(active_a | active_b)
442
+ jacc = inter / union if union > 0 else 0.0
443
+
444
+ # Cosine of weight vectors (even though from different models)
445
+ cos = float(np.dot(w_a, w_b) / (np.linalg.norm(w_a) * np.linalg.norm(w_b) + 1e-12))
446
+
447
+ # Top-k overlap
448
+ top100_a = set(np.argsort(np.abs(w_a))[-100:])
449
+ top100_b = set(np.argsort(np.abs(w_b))[-100:])
450
+ top100_overlap = len(top100_a & top100_b) / 100.0
451
+
452
+ logger.info(f" Active: A={len(active_a)}, B={len(active_b)}")
453
+ logger.info(f" Jaccard: {jacc:.4f} Cosine: {cos:.4f} "
454
+ f"Top-100 overlap: {top100_overlap:.2%}")
455
+
456
+ return {
457
+ "dim": dim_a,
458
+ "n_active_a": len(active_a),
459
+ "n_active_b": len(active_b),
460
+ "intersection": inter,
461
+ "union": union,
462
+ "jaccard": jacc,
463
+ "cosine": cos,
464
+ "top100_overlap": top100_overlap,
465
+ }
466
+
467
+
468
+ # ─────────────────────────────────────────────────────────────────────────────
469
+ # [CM-6] UNIVERSALITY SCORE
470
+ # ─────────────────────────────────────────────────────────────────────────────
471
+
472
+ def universality_score(all_cka, all_corr, all_diff, all_layer_corr,
473
+ n_bootstrap=1000):
474
+ """
475
+ [CM-6] Aggregate metric: geometric mean of CKA, score correlation,
476
+ differential AUROC, and layer consistency.
477
+ """
478
+ logger.info("[CM-6] Universality score")
479
+
480
+ components = {}
481
+
482
+ # Mean best CKA across pairs
483
+ cka_vals = [v["best_cka"] for v in all_cka.values() if v]
484
+ if cka_vals:
485
+ components["mean_cka"] = float(np.mean(cka_vals))
486
+
487
+ # Mean score correlation
488
+ corr_vals = [v["correlation"] for v in all_corr.values() if v]
489
+ if corr_vals:
490
+ components["mean_score_corr"] = float(np.mean(corr_vals))
491
+
492
+ # Mean differential AUROC
493
+ diff_aurocs = []
494
+ for v in all_diff.values():
495
+ if v:
496
+ diff_aurocs.extend([v["auroc_a"], v["auroc_b"]])
497
+ if diff_aurocs:
498
+ components["mean_diff_auroc"] = float(np.mean(diff_aurocs))
499
+
500
+ # Layer consistency (1 - std of fractions)
501
+ if all_layer_corr:
502
+ components["layer_consistency"] = float(
503
+ 1.0 - all_layer_corr.get("std_fraction", 0.5))
504
+
505
+ if not components:
506
+ return None
507
+
508
+ vals = list(components.values())
509
+ # Geometric mean
510
+ geo_mean = float(np.exp(np.mean(np.log(np.clip(vals, 1e-6, None)))))
511
+
512
+ # Bootstrap CI
513
+ boot = []
514
+ for _ in range(n_bootstrap):
515
+ idx = np.random.choice(len(vals), len(vals), replace=True)
516
+ boot.append(np.exp(np.mean(np.log(np.clip(np.array(vals)[idx], 1e-6, None)))))
517
+ ci_lo = float(np.percentile(boot, 2.5))
518
+ ci_hi = float(np.percentile(boot, 97.5))
519
+
520
+ logger.info(f" Components: {components}")
521
+ logger.info(f" Universality: {geo_mean:.4f} [{ci_lo:.4f}, {ci_hi:.4f}]")
522
+
523
+ return {
524
+ "components": components,
525
+ "universality_score": geo_mean,
526
+ "ci_95": [ci_lo, ci_hi],
527
+ }
528
+
529
+
530
+ # ─────────────────────────────────────────────────────────────────────────────
531
+ # FIGURES
532
+ # ─────────────────────────────────────────────────────────────────────────────
533
+
534
+ def save_cross_figures(out_dir, keys, all_cka, all_corr, all_diff,
535
+ layer_data, neuron_data, univ_data):
536
+ import matplotlib
537
+ matplotlib.use("Agg")
538
+ import matplotlib.pyplot as plt
539
+
540
+ fig_dir = Path(out_dir) / "figures"
541
+ fig_dir.mkdir(parents=True, exist_ok=True)
542
+
543
+ P = {"drift": "#e74c3c", "unc": "#3498db", "corr": "#2ecc71",
544
+ "null": "#9b59b6", "neu": "#e67e22"}
545
+
546
+ # ── CM-1: CKA heatmaps ───────────────────────────────────────────────
547
+ cka_pairs = [(k, v) for k, v in all_cka.items() if v]
548
+ if cka_pairs:
549
+ n_pairs = len(cka_pairs)
550
+ fig, axes = plt.subplots(1, n_pairs, figsize=(8 * n_pairs, 7))
551
+ if n_pairs == 1:
552
+ axes = [axes]
553
+ fig.suptitle("[CM-1] Cross-Model CKA", fontsize=16, fontweight="bold")
554
+ for ax, (pair_key, data) in zip(axes, cka_pairs):
555
+ mat = np.array(data["cka_matrix"])
556
+ im = ax.imshow(mat, cmap="viridis", vmin=0, vmax=1, aspect="auto")
557
+ la = data["layers_a"]
558
+ lb = data["layers_b"]
559
+ step_a = max(1, len(la) // 6)
560
+ step_b = max(1, len(lb) // 6)
561
+ ax.set_xticks(range(0, len(lb), step_b))
562
+ ax.set_yticks(range(0, len(la), step_a))
563
+ ax.set_xticklabels([lb[i] for i in range(0, len(lb), step_b)])
564
+ ax.set_yticklabels([la[i] for i in range(0, len(la), step_a)])
565
+ parts = pair_key.split("_vs_")
566
+ ax.set(xlabel=f"{parts[1]} layer", ylabel=f"{parts[0]} layer",
567
+ title=f"{pair_key}\nbest={data['best_cka']:.3f}")
568
+ plt.colorbar(im, ax=ax, shrink=0.8)
569
+ plt.tight_layout()
570
+ plt.savefig(fig_dir / "fig_cm1_cka.png", dpi=300, bbox_inches="tight")
571
+ plt.close()
572
+ logger.info(" fig_cm1 saved")
573
+
574
+ # ── CM-2: Score correlation matrix ────────────────────────────────────
575
+ if len(keys) >= 2 and all_corr:
576
+ n = len(keys)
577
+ mat = np.eye(n)
578
+ for pair_key, data in all_corr.items():
579
+ if data is None:
580
+ continue
581
+ parts = pair_key.split("_vs_")
582
+ if len(parts) == 2:
583
+ i = keys.index(parts[0]) if parts[0] in keys else -1
584
+ j = keys.index(parts[1]) if parts[1] in keys else -1
585
+ if i >= 0 and j >= 0:
586
+ mat[i, j] = mat[j, i] = data["correlation"]
587
+
588
+ fig, ax = plt.subplots(figsize=(8, 7))
589
+ im = ax.imshow(mat, cmap="RdBu_r", vmin=-1, vmax=1)
590
+ ax.set_xticks(range(n))
591
+ ax.set_yticks(range(n))
592
+ ax.set_xticklabels(keys, fontsize=12, rotation=20)
593
+ ax.set_yticklabels(keys, fontsize=12)
594
+ for i in range(n):
595
+ for j in range(n):
596
+ c = "white" if abs(mat[i, j]) > 0.5 else "black"
597
+ ax.text(j, i, f"{mat[i,j]:.3f}", ha="center", va="center",
598
+ fontsize=13, fontweight="bold", color=c)
599
+ ax.set_title("[CM-2] Drift Score Correlation Matrix", fontsize=14)
600
+ plt.colorbar(im, ax=ax, shrink=0.8)
601
+ plt.tight_layout()
602
+ plt.savefig(fig_dir / "fig_cm2_corr.png", dpi=300, bbox_inches="tight")
603
+ plt.close()
604
+ logger.info(" fig_cm2 saved")
605
+
606
+ # ── CM-3: Differential facts ──────────────────────────────────────────
607
+ diff_pairs = [(k, v) for k, v in all_diff.items() if v]
608
+ if diff_pairs:
609
+ n_pairs = min(len(diff_pairs), 4)
610
+ fig, axes = plt.subplots(1, n_pairs, figsize=(7 * n_pairs, 6))
611
+ if n_pairs == 1:
612
+ axes = [axes]
613
+ fig.suptitle("[CM-3] Differential Facts", fontsize=16, fontweight="bold")
614
+ for ax, (pair_key, data) in zip(axes, diff_pairs[:n_pairs]):
615
+ sa = np.array(data["scores_a"])
616
+ sb = np.array(data["scores_b"])
617
+ la = np.array(data["labels_a"])
618
+ lb = np.array(data["labels_b"])
619
+ # Color by which model says drifted
620
+ a_drifted = la.astype(bool) & ~lb.astype(bool)
621
+ b_drifted = ~la.astype(bool) & lb.astype(bool)
622
+ ax.scatter(sa[a_drifted], sb[a_drifted], c=P["drift"], alpha=0.5,
623
+ s=30, label="A=drifted, B=stable")
624
+ ax.scatter(sa[b_drifted], sb[b_drifted], c=P["unc"], alpha=0.5,
625
+ s=30, label="A=stable, B=drifted")
626
+ ax.plot([0, 1], [0, 1], "k--", alpha=0.3)
627
+ ax.axhline(0.5, color="gray", ls=":", alpha=0.3)
628
+ ax.axvline(0.5, color="gray", ls=":", alpha=0.3)
629
+ parts = pair_key.split("_vs_")
630
+ ax.set(xlabel=f"{parts[0]} score", ylabel=f"{parts[1]} score",
631
+ title=f"{pair_key}\nr={data['score_correlation']:.3f}")
632
+ ax.legend(fontsize=8)
633
+ ax.grid(alpha=0.2)
634
+ plt.tight_layout()
635
+ plt.savefig(fig_dir / "fig_cm3_diff.png", dpi=300, bbox_inches="tight")
636
+ plt.close()
637
+ logger.info(" fig_cm3 saved")
638
+
639
+ # ── CM-4: Layer correspondence ────────────────────────────────────────
640
+ if layer_data and "per_model" in layer_data:
641
+ pm = layer_data["per_model"]
642
+ models = sorted(pm.keys())
643
+ fig, axes = plt.subplots(1, 2, figsize=(14, 6))
644
+ fig.suptitle("[CM-4] Layer Correspondence", fontsize=14,
645
+ fontweight="bold")
646
+
647
+ # Absolute layers
648
+ x = np.arange(len(models))
649
+ bls = [pm[m]["best_layer"] for m in models]
650
+ nls = [pm[m]["n_layers"] for m in models]
651
+ ax = axes[0]
652
+ ax.bar(x, bls, color=P["drift"], edgecolor="black", lw=0.5,
653
+ label="Best layer")
654
+ ax.bar(x, [n - b for b, n in zip(bls, nls)], bottom=bls,
655
+ color="#ecf0f1", edgecolor="black", lw=0.5, label="Remaining")
656
+ ax.set_xticks(x)
657
+ ax.set_xticklabels(models, fontsize=11)
658
+ ax.set(ylabel="Layer", title="Best Drift Layer (absolute)")
659
+ ax.legend()
660
+ ax.grid(alpha=0.3, axis="y")
661
+
662
+ # Fraction
663
+ ax = axes[1]
664
+ fracs = [pm[m]["fraction"] for m in models]
665
+ bars = ax.bar(x, fracs, color=P["neu"], edgecolor="black", lw=0.5)
666
+ ax.axhline(layer_data["mean_fraction"], color="red", ls="--", lw=2,
667
+ label=f"Mean: {layer_data['mean_fraction']:.1%}")
668
+ ax.fill_between(
669
+ [-0.5, len(models) - 0.5],
670
+ layer_data["mean_fraction"] - layer_data["std_fraction"],
671
+ layer_data["mean_fraction"] + layer_data["std_fraction"],
672
+ alpha=0.2, color="red")
673
+ ax.set_xticks(x)
674
+ ax.set_xticklabels(models, fontsize=11)
675
+ ax.set(ylabel="Fraction of depth", title="Best Layer as % of Depth",
676
+ ylim=(0, 1))
677
+ ax.legend()
678
+ ax.grid(alpha=0.3, axis="y")
679
+ plt.tight_layout()
680
+ plt.savefig(fig_dir / "fig_cm4_layers.png",
681
+ dpi=300, bbox_inches="tight")
682
+ plt.close()
683
+ logger.info(" fig_cm4 saved")
684
+
685
+ # ── CM-6: Summary ─────────────────────────────────────────────────────
686
+ if univ_data:
687
+ fig, ax = plt.subplots(figsize=(10, 6))
688
+ comp = univ_data["components"]
689
+ names = list(comp.keys())
690
+ vals = list(comp.values())
691
+ x = np.arange(len(names))
692
+ colors = [P["drift"], P["unc"], P["corr"], P["neu"]][:len(names)]
693
+ ax.bar(x, vals, color=colors, edgecolor="black", lw=0.5, alpha=0.8)
694
+ ax.axhline(univ_data["universality_score"], color="red", ls="--",
695
+ lw=2.5,
696
+ label=f"Geo mean: {univ_data['universality_score']:.3f} "
697
+ f"[{univ_data['ci_95'][0]:.3f}, "
698
+ f"{univ_data['ci_95'][1]:.3f}]")
699
+ ax.set_xticks(x)
700
+ ax.set_xticklabels([n.replace("_", "\n") for n in names], fontsize=10)
701
+ ax.set(ylabel="Score", title="[CM-6] Universality Score Components",
702
+ ylim=(0, 1.1))
703
+ ax.legend(fontsize=11)
704
+ ax.grid(alpha=0.3, axis="y")
705
+ plt.tight_layout()
706
+ plt.savefig(fig_dir / "fig_cm6_summary.png",
707
+ dpi=300, bbox_inches="tight")
708
+ plt.close()
709
+ logger.info(" fig_cm6 saved")
710
+
711
+ logger.info(f"All cross-model figures -> {fig_dir}")
712
+
713
+
714
+ # ─────────────────────────────────────────────────────────────────────────────
715
+ # MAIN
716
+ # ─────────────────────────────────────────────────────────────────────────────
717
+
718
+ def main():
719
+ p = argparse.ArgumentParser(
720
+ description="Cross-model drift analysis",
721
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
722
+ p.add_argument("--models", nargs="+", default=None,
723
+ help="Model keys to compare")
724
+ p.add_argument("--all", action="store_true",
725
+ help="Use all models with available caches")
726
+ p.add_argument("--config", default="models.yaml")
727
+ p.add_argument("--output_dir", default=None)
728
+ p.add_argument("--device", default="cuda:0")
729
+ p.add_argument("--quick", action="store_true",
730
+ help="Skip full-layer CKA, just best-layer")
731
+ args = p.parse_args()
732
+
733
+ cfg = load_config(args.config)
734
+ defaults = cfg.get("defaults", {})
735
+ output_dir = args.output_dir or defaults.get("output_dir",
736
+ "data/experiments/v4")
737
+
738
+ # Determine which models to use
739
+ if args.all:
740
+ model_keys = list(cfg["models"].keys())
741
+ elif args.models:
742
+ model_keys = args.models
743
+ else:
744
+ logger.error("Specify --models or --all")
745
+ return
746
+
747
+ # Load caches and bundles
748
+ all_results = {}
749
+ all_bundles = {}
750
+ all_final = {}
751
+ for key in model_keys:
752
+ res = load_cache(output_dir, key)
753
+ bundle = load_probe_bundle(output_dir, key)
754
+ final = load_final_results(output_dir, key)
755
+ if res and bundle:
756
+ all_results[key] = res
757
+ all_bundles[key] = bundle
758
+ if final:
759
+ all_final[key] = final
760
+
761
+ keys = sorted(all_results.keys())
762
+ logger.info(f"\nModels available: {keys}")
763
+
764
+ if len(keys) < 2:
765
+ logger.error("Need at least 2 models with caches + bundles")
766
+ return
767
+
768
+ cross_dir = Path(output_dir) / "cross_model"
769
+ cross_dir.mkdir(parents=True, exist_ok=True)
770
+
771
+ # Run all 6 experiments
772
+ all_cka = {}
773
+ all_corr = {}
774
+ all_diff = {}
775
+ all_neuron = {}
776
+
777
+ for i, ka in enumerate(keys):
778
+ for j, kb in enumerate(keys):
779
+ if i >= j:
780
+ continue
781
+ pair = f"{ka}_vs_{kb}"
782
+ logger.info(f"\n{'─'*50}")
783
+ logger.info(f" {pair}")
784
+ logger.info(f"{'─'*50}")
785
+
786
+ # [CM-1] CKA
787
+ all_cka[pair] = cka_analysis(
788
+ all_results[ka], all_results[kb], ka, kb, quick=args.quick)
789
+
790
+ # [CM-2] Score correlation
791
+ all_corr[pair] = score_correlation(
792
+ all_results[ka], all_results[kb], ka, kb,
793
+ all_bundles[ka], all_bundles[kb], args.device)
794
+
795
+ # [CM-3] Differential facts
796
+ all_diff[pair] = differential_facts(
797
+ all_results[ka], all_results[kb], ka, kb,
798
+ all_bundles[ka], all_bundles[kb], args.device)
799
+
800
+ # [CM-5] Neuron overlap
801
+ all_neuron[pair] = neuron_overlap(
802
+ all_bundles[ka], all_bundles[kb], ka, kb)
803
+
804
+ # [CM-4] Layer correspondence
805
+ layer_data = layer_correspondence(all_bundles, all_final)
806
+
807
+ # [CM-6] Universality score
808
+ univ_data = universality_score(all_cka, all_corr, all_diff, layer_data)
809
+
810
+ # Save results
811
+ results = {
812
+ "models": keys,
813
+ "cka": {k: v for k, v in all_cka.items()},
814
+ "score_correlation": {k: v for k, v in all_corr.items()},
815
+ "differential_facts": {k: v for k, v in all_diff.items()},
816
+ "neuron_overlap": {k: v for k, v in all_neuron.items() if v},
817
+ "layer_correspondence": layer_data,
818
+ "universality": univ_data,
819
+ "timestamp": datetime.now().isoformat(),
820
+ }
821
+
822
+ from datetime import datetime
823
+ out_path = cross_dir / "cross_model_results.json"
824
+ with open(out_path, "w") as f:
825
+ json.dump(results, f, indent=2, default=str)
826
+ logger.info(f"\nResults saved: {out_path}")
827
+
828
+ # Figures
829
+ save_cross_figures(str(cross_dir), keys, all_cka, all_corr, all_diff,
830
+ layer_data, all_neuron, univ_data)
831
+
832
+ # Print summary
833
+ print(f"\n{'='*70}")
834
+ print(f" CROSS-MODEL SUMMARY")
835
+ print(f"{'='*70}")
836
+ for pair, data in all_corr.items():
837
+ if data:
838
+ print(f" {pair}: score_corr={data['correlation']:.4f}")
839
+ for pair, data in all_diff.items():
840
+ if data:
841
+ print(f" {pair}: diff_AUROC_a={data['auroc_a']:.4f} "
842
+ f"diff_AUROC_b={data['auroc_b']:.4f} "
843
+ f"n_diff={data['n_differential']}")
844
+ if layer_data:
845
+ print(f"\n Layer correspondence: "
846
+ f"{layer_data['mean_fraction']:.1%} +/- "
847
+ f"{layer_data['std_fraction']:.1%}")
848
+ if univ_data:
849
+ print(f"\n UNIVERSALITY SCORE: "
850
+ f"{univ_data['universality_score']:.4f} "
851
+ f"[{univ_data['ci_95'][0]:.4f}, {univ_data['ci_95'][1]:.4f}]")
852
+ print(f"{'='*70}")
853
+
854
+
855
+ if __name__ == "__main__":
856
+ main()
data/_paraphrase_checkpoint.json ADDED
The diff for this file is too large to render. See raw diff
 
data/experiments/attention_analysis/attention_raw.json ADDED
The diff for this file is too large to render. See raw diff
 
data/experiments/attention_analysis/attention_summary.json ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "DRIFTED": {
3
+ "count": 16,
4
+ "avg_ltae": 1.2609138488769531,
5
+ "avg_taf_mean": NaN,
6
+ "avg_taf_std": NaN,
7
+ "avg_total_year_attn": NaN,
8
+ "layer_avg_ltae": [
9
+ 1.47076416015625,
10
+ 1.45465087890625,
11
+ 1.437744140625,
12
+ 1.49578857421875,
13
+ 1.27398681640625,
14
+ 1.43035888671875,
15
+ 1.554931640625,
16
+ 1.17523193359375,
17
+ 1.27337646484375,
18
+ 0.9449462890625,
19
+ 1.45892333984375,
20
+ 1.42083740234375,
21
+ 1.42706298828125,
22
+ 1.34649658203125,
23
+ 1.35137939453125,
24
+ 1.24322509765625,
25
+ 1.160400390625,
26
+ 0.8953857421875,
27
+ 1.22137451171875,
28
+ 0.4127349853515625,
29
+ 0.9071044921875,
30
+ 1.000396728515625,
31
+ 1.181884765625,
32
+ 1.33380126953125,
33
+ 1.43408203125,
34
+ 1.26983642578125,
35
+ 1.37713623046875,
36
+ 1.35174560546875
37
+ ],
38
+ "layer_avg_taf_mean": [
39
+ 0.049678802490234375,
40
+ 0.05012702941894531,
41
+ 0.05734062194824219,
42
+ 0.057758331298828125,
43
+ 0.011784553527832031,
44
+ 0.01109457015991211,
45
+ 0.010257244110107422,
46
+ 0.02216625213623047,
47
+ 0.014124393463134766,
48
+ 0.020898818969726562,
49
+ 0.017520904541015625,
50
+ 0.03136920928955078,
51
+ 0.03072357177734375,
52
+ 0.041080474853515625,
53
+ 0.04144859313964844,
54
+ 0.036899566650390625,
55
+ 0.0413665771484375,
56
+ 0.05501365661621094,
57
+ 0.04434967041015625,
58
+ 0.07583045959472656,
59
+ 0.04676342010498047,
60
+ 0.04759979248046875,
61
+ 0.18784332275390625,
62
+ 0.02937793731689453,
63
+ 0.008492469787597656,
64
+ 0.015366077423095703,
65
+ 0.016284942626953125,
66
+ NaN
67
+ ]
68
+ },
69
+ "known_drift_not_drifted": {
70
+ "count": 18,
71
+ "avg_ltae": 1.267578125,
72
+ "avg_taf_mean": NaN,
73
+ "avg_taf_std": NaN,
74
+ "avg_total_year_attn": NaN,
75
+ "layer_avg_ltae": [
76
+ 1.4648980034722223,
77
+ 1.4579535590277777,
78
+ 1.4423285590277777,
79
+ 1.4934353298611112,
80
+ 1.2616102430555556,
81
+ 1.4229058159722223,
82
+ 1.5550130208333333,
83
+ 1.1838107638888888,
84
+ 1.2801106770833333,
85
+ 0.9503038194444444,
86
+ 1.4531792534722223,
87
+ 1.4236653645833333,
88
+ 1.4342990451388888,
89
+ 1.3516710069444444,
90
+ 1.3551974826388888,
91
+ 1.2670355902777777,
92
+ 1.2147352430555556,
93
+ 0.9173719618055556,
94
+ 1.2401258680555556,
95
+ 0.4350043402777778,
96
+ 0.9250759548611112,
97
+ 1.0575086805555556,
98
+ 1.2380099826388888,
99
+ 1.29541015625,
100
+ 1.4357096354166667,
101
+ 1.2632378472222223,
102
+ 1.3846028645833333,
103
+ 1.2879774305555556
104
+ ],
105
+ "layer_avg_taf_mean": [
106
+ 0.05064731174045139,
107
+ 0.050286187065972224,
108
+ 0.0579071044921875,
109
+ 0.058252970377604164,
110
+ 0.011647542317708334,
111
+ 0.011139763726128472,
112
+ 0.010056813557942709,
113
+ 0.021834479437934026,
114
+ 0.013749440511067709,
115
+ 0.020189921061197918,
116
+ 0.017441219753689237,
117
+ 0.031412760416666664,
118
+ 0.030748155381944444,
119
+ 0.04120042588975695,
120
+ 0.041086832682291664,
121
+ 0.035666571723090276,
122
+ 0.039328681098090276,
123
+ 0.05196804470486111,
124
+ 0.041771782769097224,
125
+ 0.07118055555555555,
126
+ 0.043381585015190974,
127
+ 0.04065110948350695,
128
+ 0.16392347547743055,
129
+ 0.03054046630859375,
130
+ 0.008420096503363715,
131
+ 0.014623006184895834,
132
+ 0.015851338704427082,
133
+ NaN
134
+ ]
135
+ },
136
+ "no_drift_not_drifted": {
137
+ "count": 14,
138
+ "avg_ltae": 1.2851749342315046,
139
+ "avg_taf_mean": 0.03568217219138632,
140
+ "avg_taf_std": 0.039409403898278066,
141
+ "avg_total_year_attn": 27.97503662109375,
142
+ "layer_avg_ltae": [
143
+ 1.4906529017857142,
144
+ 1.46337890625,
145
+ 1.4345703125,
146
+ 1.482421875,
147
+ 1.3511439732142858,
148
+ 1.4000418526785714,
149
+ 1.5374581473214286,
150
+ 1.2036830357142858,
151
+ 1.2655552455357142,
152
+ 0.9605538504464286,
153
+ 1.4325474330357142,
154
+ 1.4031110491071428,
155
+ 1.4244559151785714,
156
+ 1.3436802455357142,
157
+ 1.3675362723214286,
158
+ 1.3018275669642858,
159
+ 1.2701590401785714,
160
+ 0.9279436383928571,
161
+ 1.2454659598214286,
162
+ 0.47621372767857145,
163
+ 0.9602399553571429,
164
+ 1.1858956473214286,
165
+ 1.2562081473214286,
166
+ 1.3112444196428572,
167
+ 1.43115234375,
168
+ 1.2403738839285714,
169
+ 1.3879045758928572,
170
+ 1.4294782366071428
171
+ ],
172
+ "layer_avg_taf_mean": [
173
+ 0.04708644321986607,
174
+ 0.04849897112165179,
175
+ 0.05767168317522321,
176
+ 0.0569610595703125,
177
+ 0.011593409946986608,
178
+ 0.011277879987444197,
179
+ 0.010521480015345983,
180
+ 0.021795000348772322,
181
+ 0.014025551932198661,
182
+ 0.020132882254464284,
183
+ 0.01705987112862723,
184
+ 0.029680524553571428,
185
+ 0.029687064034598216,
186
+ 0.040538242885044644,
187
+ 0.04040309361049107,
188
+ 0.034088134765625,
189
+ 0.03676714215959821,
190
+ 0.04921613420758929,
191
+ 0.040592738560267856,
192
+ 0.06517464773995536,
193
+ 0.03857203892299107,
194
+ 0.03687831333705357,
195
+ 0.13168116978236608,
196
+ 0.03133174351283482,
197
+ 0.010894230433872767,
198
+ 0.020661490304129466,
199
+ 0.020259857177734375,
200
+ 0.026050022670200894
201
+ ]
202
+ },
203
+ "stable_not_drifted": {
204
+ "count": 2,
205
+ "avg_ltae": 1.2617100306919644,
206
+ "avg_taf_mean": 0.039080756051199775,
207
+ "avg_taf_std": 0.044298444475446425,
208
+ "avg_total_year_attn": 30.6380615234375,
209
+ "layer_avg_ltae": [
210
+ 1.51611328125,
211
+ 1.38330078125,
212
+ 1.443359375,
213
+ 1.4951171875,
214
+ 1.32470703125,
215
+ 1.3896484375,
216
+ 1.490234375,
217
+ 1.36767578125,
218
+ 1.298828125,
219
+ 0.94921875,
220
+ 1.423828125,
221
+ 1.41796875,
222
+ 1.36962890625,
223
+ 1.36572265625,
224
+ 1.3076171875,
225
+ 1.3017578125,
226
+ 1.275390625,
227
+ 0.839111328125,
228
+ 1.034423828125,
229
+ 0.396240234375,
230
+ 0.93896484375,
231
+ 1.031982421875,
232
+ 1.097900390625,
233
+ 1.32470703125,
234
+ 1.544921875,
235
+ 1.21484375,
236
+ 1.43115234375,
237
+ 1.353515625
238
+ ],
239
+ "layer_avg_taf_mean": [
240
+ 0.0544586181640625,
241
+ 0.0511016845703125,
242
+ 0.0582122802734375,
243
+ 0.0601806640625,
244
+ 0.0115966796875,
245
+ 0.012378692626953125,
246
+ 0.012664794921875,
247
+ 0.02342987060546875,
248
+ 0.015239715576171875,
249
+ 0.02202606201171875,
250
+ 0.0182647705078125,
251
+ 0.033050537109375,
252
+ 0.03174591064453125,
253
+ 0.0473480224609375,
254
+ 0.035797119140625,
255
+ 0.03704833984375,
256
+ 0.03562164306640625,
257
+ 0.0518341064453125,
258
+ 0.037261962890625,
259
+ 0.060516357421875,
260
+ 0.041290283203125,
261
+ 0.037200927734375,
262
+ 0.0977630615234375,
263
+ 0.0742340087890625,
264
+ 0.0103912353515625,
265
+ 0.0435638427734375,
266
+ 0.02713775634765625,
267
+ 0.0529022216796875
268
+ ]
269
+ }
270
+ }
data/experiments/disentanglement/cached_states.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeca40a26e386337cc926ee3314f4e1a7c51a6a5e669f4153107fedb62df0e36
3
+ size 572278675
data/experiments/disentanglement/disentanglement_results.json ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen2.5-7B-Instruct",
3
+ "n_samples": 2414,
4
+ "n_drifted": 734,
5
+ "n_non_drifted": 1680,
6
+ "confidence_threshold": 0.5,
7
+ "best_layer": 25,
8
+ "layer_results": {
9
+ "20": {
10
+ "drift_auroc": 0.9401817608974443,
11
+ "drift_C": 1.0,
12
+ "drift_active_neurons": 1144,
13
+ "uncertainty_auroc": 0.9850069912505234,
14
+ "uncertainty_C": 1.0,
15
+ "uncertainty_active_neurons": 605,
16
+ "cosine_similarity": -0.008123186416924,
17
+ "neuron_overlap_ratio": 0.15981432360742706,
18
+ "cell_analysis": {
19
+ "A: confident+stable": {
20
+ "n": 399,
21
+ "mean_drift_score": 0.0940876454114914,
22
+ "std_drift_score": 0.15016916394233704
23
+ },
24
+ "B: confident+drifted": {
25
+ "n": 148,
26
+ "mean_drift_score": 0.7503010034561157,
27
+ "std_drift_score": 0.2528006136417389
28
+ },
29
+ "C: uncertain+stable": {
30
+ "n": 1281,
31
+ "mean_drift_score": 0.09652922302484512,
32
+ "std_drift_score": 0.15843220055103302
33
+ },
34
+ "D: uncertain+drifted": {
35
+ "n": 586,
36
+ "mean_drift_score": 0.7983659505844116,
37
+ "std_drift_score": 0.24514280259609222
38
+ }
39
+ }
40
+ },
41
+ "21": {
42
+ "drift_auroc": 0.9301453621239755,
43
+ "drift_C": 1.0,
44
+ "drift_active_neurons": 1177,
45
+ "uncertainty_auroc": 0.9849118890796618,
46
+ "uncertainty_C": 1.0,
47
+ "uncertainty_active_neurons": 581,
48
+ "cosine_similarity": 0.017387816682457924,
49
+ "neuron_overlap_ratio": 0.1497710922171354,
50
+ "cell_analysis": {
51
+ "A: confident+stable": {
52
+ "n": 399,
53
+ "mean_drift_score": 0.09789332747459412,
54
+ "std_drift_score": 0.15043117105960846
55
+ },
56
+ "B: confident+drifted": {
57
+ "n": 148,
58
+ "mean_drift_score": 0.7385778427124023,
59
+ "std_drift_score": 0.24958111345767975
60
+ },
61
+ "C: uncertain+stable": {
62
+ "n": 1281,
63
+ "mean_drift_score": 0.09721978008747101,
64
+ "std_drift_score": 0.15863172709941864
65
+ },
66
+ "D: uncertain+drifted": {
67
+ "n": 586,
68
+ "mean_drift_score": 0.7971871495246887,
69
+ "std_drift_score": 0.24469861388206482
70
+ }
71
+ }
72
+ },
73
+ "22": {
74
+ "drift_auroc": 0.9331928302958495,
75
+ "drift_C": 1.0,
76
+ "drift_active_neurons": 1448,
77
+ "uncertainty_auroc": 0.9856988181780402,
78
+ "uncertainty_C": 1.0,
79
+ "uncertainty_active_neurons": 568,
80
+ "cosine_similarity": 0.0028538473416119814,
81
+ "neuron_overlap_ratio": 0.15265866209262435,
82
+ "cell_analysis": {
83
+ "A: confident+stable": {
84
+ "n": 399,
85
+ "mean_drift_score": 0.10201846808195114,
86
+ "std_drift_score": 0.15164713561534882
87
+ },
88
+ "B: confident+drifted": {
89
+ "n": 148,
90
+ "mean_drift_score": 0.7290875315666199,
91
+ "std_drift_score": 0.25161898136138916
92
+ },
93
+ "C: uncertain+stable": {
94
+ "n": 1281,
95
+ "mean_drift_score": 0.09975209087133408,
96
+ "std_drift_score": 0.16027547419071198
97
+ },
98
+ "D: uncertain+drifted": {
99
+ "n": 586,
100
+ "mean_drift_score": 0.79399174451828,
101
+ "std_drift_score": 0.24350778758525848
102
+ }
103
+ }
104
+ },
105
+ "23": {
106
+ "drift_auroc": 0.9365118470741198,
107
+ "drift_C": 1.0,
108
+ "drift_active_neurons": 1114,
109
+ "uncertainty_auroc": 0.9879286158449491,
110
+ "uncertainty_C": 1.0,
111
+ "uncertainty_active_neurons": 538,
112
+ "cosine_similarity": -0.012105713598430157,
113
+ "neuron_overlap_ratio": 0.13228238519533927,
114
+ "cell_analysis": {
115
+ "A: confident+stable": {
116
+ "n": 399,
117
+ "mean_drift_score": 0.09305085241794586,
118
+ "std_drift_score": 0.1508861929178238
119
+ },
120
+ "B: confident+drifted": {
121
+ "n": 148,
122
+ "mean_drift_score": 0.7569237947463989,
123
+ "std_drift_score": 0.2544007897377014
124
+ },
125
+ "C: uncertain+stable": {
126
+ "n": 1281,
127
+ "mean_drift_score": 0.09117205440998077,
128
+ "std_drift_score": 0.16001686453819275
129
+ },
130
+ "D: uncertain+drifted": {
131
+ "n": 586,
132
+ "mean_drift_score": 0.8082005977630615,
133
+ "std_drift_score": 0.2454250156879425
134
+ }
135
+ }
136
+ },
137
+ "24": {
138
+ "drift_auroc": 0.9431449716220474,
139
+ "drift_C": 1.0,
140
+ "drift_active_neurons": 1016,
141
+ "uncertainty_auroc": 0.9885632111980065,
142
+ "uncertainty_C": 1.0,
143
+ "uncertainty_active_neurons": 534,
144
+ "cosine_similarity": -0.0015611619455739856,
145
+ "neuron_overlap_ratio": 0.11591072714182865,
146
+ "cell_analysis": {
147
+ "A: confident+stable": {
148
+ "n": 399,
149
+ "mean_drift_score": 0.09185729175806046,
150
+ "std_drift_score": 0.14805559813976288
151
+ },
152
+ "B: confident+drifted": {
153
+ "n": 148,
154
+ "mean_drift_score": 0.7582934498786926,
155
+ "std_drift_score": 0.25423485040664673
156
+ },
157
+ "C: uncertain+stable": {
158
+ "n": 1281,
159
+ "mean_drift_score": 0.09093555808067322,
160
+ "std_drift_score": 0.16004474461078644
161
+ },
162
+ "D: uncertain+drifted": {
163
+ "n": 586,
164
+ "mean_drift_score": 0.8083383440971375,
165
+ "std_drift_score": 0.2449658066034317
166
+ }
167
+ }
168
+ },
169
+ "25": {
170
+ "drift_auroc": 0.9475673504887931,
171
+ "drift_C": 1.0,
172
+ "drift_active_neurons": 952,
173
+ "uncertainty_auroc": 0.9845519582427782,
174
+ "uncertainty_C": 1.0,
175
+ "uncertainty_active_neurons": 606,
176
+ "cosine_similarity": -0.012070227414369583,
177
+ "neuron_overlap_ratio": 0.14896755162241887,
178
+ "cell_analysis": {
179
+ "A: confident+stable": {
180
+ "n": 399,
181
+ "mean_drift_score": 0.09013982117176056,
182
+ "std_drift_score": 0.1471075862646103
183
+ },
184
+ "B: confident+drifted": {
185
+ "n": 148,
186
+ "mean_drift_score": 0.7635744214057922,
187
+ "std_drift_score": 0.2572714686393738
188
+ },
189
+ "C: uncertain+stable": {
190
+ "n": 1281,
191
+ "mean_drift_score": 0.0893697664141655,
192
+ "std_drift_score": 0.1600065380334854
193
+ },
194
+ "D: uncertain+drifted": {
195
+ "n": 586,
196
+ "mean_drift_score": 0.8106054663658142,
197
+ "std_drift_score": 0.24479782581329346
198
+ }
199
+ }
200
+ },
201
+ "26": {
202
+ "drift_auroc": 0.9473336040221698,
203
+ "drift_C": 1.0,
204
+ "drift_active_neurons": 1056,
205
+ "uncertainty_auroc": 0.9875504558093322,
206
+ "uncertainty_C": 1.0,
207
+ "uncertainty_active_neurons": 598,
208
+ "cosine_similarity": -0.02078830823302269,
209
+ "neuron_overlap_ratio": 0.13676975945017184,
210
+ "cell_analysis": {
211
+ "A: confident+stable": {
212
+ "n": 399,
213
+ "mean_drift_score": 0.08617039024829865,
214
+ "std_drift_score": 0.14635789394378662
215
+ },
216
+ "B: confident+drifted": {
217
+ "n": 148,
218
+ "mean_drift_score": 0.7733995318412781,
219
+ "std_drift_score": 0.257551908493042
220
+ },
221
+ "C: uncertain+stable": {
222
+ "n": 1281,
223
+ "mean_drift_score": 0.08761545270681381,
224
+ "std_drift_score": 0.16016000509262085
225
+ },
226
+ "D: uncertain+drifted": {
227
+ "n": 586,
228
+ "mean_drift_score": 0.815304696559906,
229
+ "std_drift_score": 0.24687542021274567
230
+ }
231
+ }
232
+ },
233
+ "27": {
234
+ "drift_auroc": 0.9470822738749772,
235
+ "drift_C": 1.0,
236
+ "drift_active_neurons": 750,
237
+ "uncertainty_auroc": 0.9909450020663509,
238
+ "uncertainty_C": 1.0,
239
+ "uncertainty_active_neurons": 375,
240
+ "cosine_similarity": -0.01884481869637966,
241
+ "neuron_overlap_ratio": 0.10294117647058823,
242
+ "cell_analysis": {
243
+ "A: confident+stable": {
244
+ "n": 399,
245
+ "mean_drift_score": 0.08059749752283096,
246
+ "std_drift_score": 0.14659664034843445
247
+ },
248
+ "B: confident+drifted": {
249
+ "n": 148,
250
+ "mean_drift_score": 0.7845885753631592,
251
+ "std_drift_score": 0.2633582055568695
252
+ },
253
+ "C: uncertain+stable": {
254
+ "n": 1281,
255
+ "mean_drift_score": 0.08413389325141907,
256
+ "std_drift_score": 0.1604897379875183
257
+ },
258
+ "D: uncertain+drifted": {
259
+ "n": 586,
260
+ "mean_drift_score": 0.8195926547050476,
261
+ "std_drift_score": 0.24837486445903778
262
+ }
263
+ }
264
+ }
265
+ },
266
+ "permutation_test": {
267
+ "true_auroc": 0.7590015570260802,
268
+ "null_mean": 0.5,
269
+ "null_std": 0.0,
270
+ "p_value": 0.0,
271
+ "n_permutations": 1000
272
+ },
273
+ "sparsity_curve": [
274
+ {
275
+ "C": 0.0001,
276
+ "n_active_neurons": 0,
277
+ "auroc": 0.5
278
+ },
279
+ {
280
+ "C": 0.0005,
281
+ "n_active_neurons": 0,
282
+ "auroc": 0.5
283
+ },
284
+ {
285
+ "C": 0.001,
286
+ "n_active_neurons": 0,
287
+ "auroc": 0.5
288
+ },
289
+ {
290
+ "C": 0.005,
291
+ "n_active_neurons": 7,
292
+ "auroc": 0.7590015570260802
293
+ },
294
+ {
295
+ "C": 0.01,
296
+ "n_active_neurons": 25,
297
+ "auroc": 0.8152385818087453
298
+ },
299
+ {
300
+ "C": 0.05,
301
+ "n_active_neurons": 125,
302
+ "auroc": 0.9305858310626702
303
+ },
304
+ {
305
+ "C": 0.1,
306
+ "n_active_neurons": 203,
307
+ "auroc": 0.9586617685221227
308
+ },
309
+ {
310
+ "C": 0.5,
311
+ "n_active_neurons": 588,
312
+ "auroc": 0.9779551057480212
313
+ },
314
+ {
315
+ "C": 1.0,
316
+ "n_active_neurons": 952,
317
+ "auroc": 0.9797083819903984
318
+ },
319
+ {
320
+ "C": 5.0,
321
+ "n_active_neurons": 2377,
322
+ "auroc": 0.9801904924094977
323
+ },
324
+ {
325
+ "C": 10.0,
326
+ "n_active_neurons": 2990,
327
+ "auroc": 0.9800303295705202
328
+ }
329
+ ],
330
+ "timestamp": "2026-03-06T04:05:50.245822"
331
+ }
data/experiments/disentanglement/fig1_disentanglement_overview.png ADDED

Git LFS Details

  • SHA256: 141d212a714a4ec02f0d4f16eb2f15b072100ac20cd2dfd21170704a810aa372
  • Pointer size: 131 Bytes
  • Size of remote file: 202 kB
data/experiments/disentanglement/fig2_pca_visualization.png ADDED

Git LFS Details

  • SHA256: d442671f64b39b366da72ae86032733db8ead943bd45e711eb9023dddae63baf
  • Pointer size: 132 Bytes
  • Size of remote file: 1.3 MB
data/experiments/disentanglement/fig3_drift_vs_uncertainty_directions.png ADDED

Git LFS Details

  • SHA256: aa53e635a161fe6568e8f519c835080329dd61159f9985dc2dde1cfa1733e196
  • Pointer size: 131 Bytes
  • Size of remote file: 698 kB
data/experiments/disentanglement/fig4_2x2_cell_analysis.png ADDED

Git LFS Details

  • SHA256: 4211474f5a9e0733465d78126c78a0a73c8b0ec2d578580a3ad6aa8b66768c80
  • Pointer size: 131 Bytes
  • Size of remote file: 121 kB
data/experiments/disentanglement_llama31/cached_states.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b0a8514c5b05e8cb22ba7edd22eebdeb0fdfecddfba21ca3bfd561ff77f0ab9
3
+ size 743568647
data/experiments/drift_neurons/drift_neuron_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "hidden_layer_0": {
3
+ "l1_lambda": 0.001,
4
+ "metrics": {
5
+ "auroc": 0.5,
6
+ "auroc_std": 0.0,
7
+ "auprc": 0.5,
8
+ "brier":
data/experiments/entropy_analysis/layer_signals_for_plotting.json ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "DRIFTED (post-cutoff, answer changed)": {
3
+ "layer_entropies": [
4
+ 5.52294921875,
5
+ 2.54351806640625,
6
+ 1.316253662109375,
7
+ 3.26361083984375,
8
+ 1.2166748046875,
9
+ 2.5682373046875,
10
+ 2.9808349609375,
11
+ 2.40643310546875,
12
+ 0.528564453125,
13
+ 1.335601806640625,
14
+ 2.7979736328125,
15
+ 6.949462890625,
16
+ 5.368408203125,
17
+ 5.93310546875,
18
+ 6.6044921875,
19
+ 6.924072265625,
20
+ 5.29052734375,
21
+ 5.44091796875,
22
+ 4.5015869140625,
23
+ 4.8775634765625,
24
+ 4.46875,
25
+ 3.368408203125,
26
+ 3.0117034912109375,
27
+ 5.2076416015625,
28
+ 5.4983367919921875,
29
+ 5.9327392578125,
30
+ 6.9853515625,
31
+ 1.7591285705566406
32
+ ],
33
+ "layer_top1_probs": [
34
+ 0.334991455078125,
35
+ 0.734527587890625,
36
+ 0.86883544921875,
37
+ 0.51593017578125,
38
+ 0.6884765625,
39
+ 0.709991455078125,
40
+ 0.600433349609375,
41
+ 0.747467041015625,
42
+ 0.951385498046875,
43
+ 0.86517333984375,
44
+ 0.671905517578125,
45
+ 0.0781402587890625,
46
+ 0.2167205810546875,
47
+ 0.200653076171875,
48
+ 0.11643218994140625,
49
+ 0.10364151000976562,
50
+ 0.3043975830078125,
51
+ 0.32175445556640625,
52
+ 0.3555450439453125,
53
+ 0.21065902709960938,
54
+ 0.2103729248046875,
55
+ 0.37808990478515625,
56
+ 0.483154296875,
57
+ 0.19554710388183594,
58
+ 0.23350238800048828,
59
+ 0.20551586151123047,
60
+ 0.11835479736328125,
61
+ 0.5825424194335938
62
+ ],
63
+ "layer_expected_probs": [
64
+ 1.300126314163208e-06,
65
+ 6.891787052154541e-07,
66
+ 2.905726432800293e-07,
67
+ 2.086162567138672e-07,
68
+ 7.078051567077637e-08,
69
+ 3.46451997756958e-07,
70
+ 1.7881393432617188e-07,
71
+ 1.564621925354004e-07,
72
+ 1.7508864402770996e-07,
73
+ 2.4959444999694824e-07,
74
+ 3.0174851417541504e-07,
75
+ 3.0919909477233887e-06,
76
+ 1.475214958190918e-06,
77
+ 2.1494925022125244e-06,
78
+ 2.25752592086792e-06,
79
+ 1.9781291484832764e-06,
80
+ 1.039355993270874e-06,
81
+ 5.848705768585205e-07,
82
+ 9.685754776000977e-08,
83
+ 3.725290298461914e-08,
84
+ 8.195638656616211e-08,
85
+ 1.601874828338623e-07,
86
+ 2.421438694000244e-07,
87
+ 3.028661012649536e-06,
88
+ 1.9855797290802e-06,
89
+ 1.2289732694625854e-05,
90
+ 0.0018126480281352997,
91
+ 0.00417274609208107
92
+ ],
93
+ "layer_old_probs": [
94
+ 2.562999725341797e-06,
95
+ 1.1511147022247314e-06,
96
+ 4.284083843231201e-07,
97
+ 2.4586915969848633e-07,
98
+ 5.587935447692871e-08,
99
+ 2.8312206268310547e-07,
100
+ 1.601874828338623e-07,
101
+ 1.8998980522155762e-07,
102
+ 2.1979212760925293e-07,
103
+ 2.980232238769531e-07,
104
+ 2.123415470123291e-07,
105
+ 3.1366944313049316e-06,
106
+ 1.475214958190918e-06,
107
+ 2.7865171432495117e-06,
108
+ 9.793788194656372e-06,
109
+ 4.120171070098877e-06,
110
+ 1.2516975402832031e-06,
111
+ 6.705522537231445e-07,
112
+ 1.043081283569336e-07,
113
+ 2.60770320892334e-08,
114
+ 5.587935447692871e-08,
115
+ 1.4156103134155273e-07,
116
+ 8.307397365570068e-07,
117
+ 2.074986696243286e-06,
118
+ 8.113682270050049e-06,
119
+ 0.0030834004282951355,
120
+ 0.00276801735162735,
121
+ 0.04702179878950119
122
+ ]
123
+ },
124
+ "KNOWN_DRIFT (pre-cutoff, model knows change)": {
125
+ "layer_entropies": [
126
+ 5.532769097222222,
127
+ 2.6904296875,
128
+ 1.4892578125,
129
+ 3.4954427083333335,
130
+ 1.192626953125,
131
+ 2.5450303819444446,
132
+ 2.9092881944444446,
133
+ 2.40625,
134
+ 0.5602891710069444,
135
+ 1.4476453993055556,
136
+ 3.0266927083333335,
137
+ 7.0546875,
138
+ 5.453342013888889,
139
+ 6.067057291666667,
140
+ 6.706380208333333,
141
+ 7.037760416666667,
142
+ 5.235243055555555,
143
+ 5.470920138888889,
144
+ 4.659830729166667,
145
+ 5.143446180555555,
146
+ 4.777886284722222,
147
+ 3.9322916666666665,
148
+ 2.8102213541666665,
149
+ 4.944769965277778,
150
+ 4.661973741319445,
151
+ 5.928276909722222,
152
+ 7.105251736111111,
153
+ 2.0204366048177085
154
+ ],
155
+ "layer_top1_probs": [
156
+ 0.3287082248263889,
157
+ 0.7144097222222222,
158
+ 0.8493381076388888,
159
+ 0.4696044921875,
160
+ 0.7038031684027778,
161
+ 0.7146538628472222,
162
+ 0.6084255642361112,
163
+ 0.7483452690972222,
164
+ 0.9480523003472222,
165
+ 0.8536241319444444,
166
+ 0.6454671223958334,
167
+ 0.0778316921657986,
168
+ 0.21478271484375,
169
+ 0.1902737087673611,
170
+ 0.11335923936631945,
171
+ 0.09361097547743055,
172
+ 0.3209296332465278,
173
+ 0.33294677734375,
174
+ 0.3272298177083333,
175
+ 0.1654052734375,
176
+ 0.1807284884982639,
177
+ 0.2668389214409722,
178
+ 0.5159708658854166,
179
+ 0.242095947265625,
180
+ 0.31541358100043404,
181
+ 0.19419691297743055,
182
+ 0.09735192192925347,
183
+ 0.4987657335069444
184
+ ],
185
+ "layer_expected_probs": [
186
+ 3.420644336276584e-06,
187
+ 2.9901663462320962e-06,
188
+ 1.3841523064507378e-06,
189
+ 1.0563267601860894e-06,
190
+ 1.7550256517198352e-07,
191
+ 7.980399661593967e-07,
192
+ 4.635916815863715e-07,
193
+ 3.7749608357747394e-07,
194
+ 3.3775965372721356e-07,
195
+ 7.020102606879341e-07,
196
+ 7.748603820800781e-07,
197
+ 4.516707526312934e-06,
198
+ 1.970264646742079e-06,
199
+ 3.741847144232856e-06,
200
+ 4.990233315361871e-06,
201
+ 3.4504466586642793e-06,
202
+ 1.4139546288384331e-06,
203
+ 1.394086413913303e-06,
204
+ 5.03328111436632e-07,
205
+ 1.1920928955078125e-07,
206
+ 9.602970547146268e-08,
207
+ 6.622738308376736e-08,
208
+ 4.967053731282552e-08,
209
+ 1.5232298109266494e-06,
210
+ 3.688865237765842e-06,
211
+ 1.6669432322184246e-05,
212
+ 0.001622299353281657,
213
+ 0.006558269262313843
214
+ ],
215
+ "layer_old_probs": [
216
+ 0.0,
217
+ 0.0,
218
+ 0.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 0.0,
224
+ 0.0,
225
+ 0.0,
226
+ 0.0,
227
+ 0.0,
228
+ 0.0,
229
+ 0.0,
230
+ 0.0,
231
+ 0.0,
232
+ 0.0,
233
+ 0.0,
234
+ 0.0,
235
+ 0.0,
236
+ 0.0,
237
+ 0.0,
238
+ 0.0,
239
+ 0.0,
240
+ 0.0,
241
+ 0.0,
242
+ 0.0,
243
+ 0.0
244
+ ]
245
+ },
246
+ "NO_DRIFT (post-cutoff, could change but didn't)": {
247
+ "layer_entropies": [
248
+ 5.503069196428571,
249
+ 2.8865792410714284,
250
+ 1.5439801897321428,
251
+ 3.3995535714285716,
252
+ 1.1728166852678572,
253
+ 2.6416015625,
254
+ 2.821986607142857,
255
+ 2.2252371651785716,
256
+ 0.4691859654017857,
257
+ 1.4714704241071428,
258
+ 2.9832589285714284,
259
+ 7.001674107142857,
260
+ 5.416015625,
261
+ 6.001116071428571,
262
+ 6.802455357142857,
263
+ 6.978794642857143,
264
+ 5.027483258928571,
265
+ 5.341657366071429,
266
+ 4.785993303571429,
267
+ 4.964006696428571,
268
+ 4.733677455357143,
269
+ 3.65771484375,
270
+ 3.9832589285714284,
271
+ 4.494698660714286,
272
+ 4.796247209821429,
273
+ 6.250558035714286,
274
+ 6.985630580357143,
275
+ 2.26318359375
276
+ ],
277
+ "layer_top1_probs": [
278
+ 0.32688685825892855,
279
+ 0.6917898995535714,
280
+ 0.8450055803571429,
281
+ 0.47745186941964285,
282
+ 0.7064034598214286,
283
+ 0.705322265625,
284
+ 0.619140625,
285
+ 0.7668805803571429,
286
+ 0.9564383370535714,
287
+ 0.8492954799107143,
288
+ 0.6484026227678571,
289
+ 0.08108084542410714,
290
+ 0.20847865513392858,
291
+ 0.19773646763392858,
292
+ 0.09962027413504464,
293
+ 0.10276576450892858,
294
+ 0.33909388950892855,
295
+ 0.35335867745535715,
296
+ 0.30741664341517855,
297
+ 0.20300728934151785,
298
+ 0.20433262416294642,
299
+ 0.30856759207589285,
300
+ 0.27522059849330355,
301
+ 0.2831638881138393,
302
+ 0.29340689522879465,
303
+ 0.1388059343610491,
304
+ 0.10486602783203125,
305
+ 0.49221365792410715
306
+ ],
307
+ "layer_expected_probs": [
308
+ 7.450580596923828e-07,
309
+ 4.981245313371931e-07,
310
+ 1.873288835797991e-07,
311
+ 4.470348358154297e-07,
312
+ 8.089201790945871e-08,
313
+ 4.896095820835658e-07,
314
+ 3.6188534327915735e-07,
315
+ 3.10795647757394e-07,
316
+ 1.1069434029715402e-07,
317
+ 2.2990362984793527e-07,
318
+ 3.405979701450893e-07,
319
+ 1.230410167149135e-06,
320
+ 6.130763462611608e-07,
321
+ 1.1793204716273716e-06,
322
+ 1.4262539999825613e-06,
323
+ 1.4347689492361887e-06,
324
+ 8.600098746163505e-07,
325
+ 4.470348358154297e-07,
326
+ 1.4901161193847656e-07,
327
+ 1.9158635820661273e-07,
328
+ 2.086162567138672e-07,
329
+ 1.532690865652902e-07,
330
+ 2.1287373134068081e-07,
331
+ 1.0601111820765903e-06,
332
+ 4.104205540248326e-06,
333
+ 4.9591064453125e-05,
334
+ 0.005290542330060687,
335
+ 0.0049742162227630615
336
+ ],
337
+ "layer_old_probs": [
338
+ 0.0,
339
+ 0.0,
340
+ 0.0,
341
+ 0.0,
342
+ 0.0,
343
+ 0.0,
344
+ 0.0,
345
+ 0.0,
346
+ 0.0,
347
+ 0.0,
348
+ 0.0,
349
+ 0.0,
350
+ 0.0,
351
+ 0.0,
352
+ 0.0,
353
+ 0.0,
354
+ 0.0,
355
+ 0.0,
356
+ 0.0,
357
+ 0.0,
358
+ 0.0,
359
+ 0.0,
360
+ 0.0,
361
+ 0.0,
362
+ 0.0,
363
+ 0.0,
364
+ 0.0,
365
+ 0.0
366
+ ]
367
+ },
368
+ "STABLE (post-cutoff, never changes)": {
369
+ "layer_entropies": [
370
+ 3.388671875,
371
+ 3.06640625,
372
+ 1.544921875,
373
+ 3.615234375,
374
+ 2.02978515625,
375
+ 3.2109375,
376
+ 2.7822265625,
377
+ 2.4091796875,
378
+ 0.6947021484375,
379
+ 0.908447265625,
380
+ 2.642578125,
381
+ 5.40625,
382
+ 3.8681640625,
383
+ 5.76953125,
384
+ 4.310546875,
385
+ 5.1875,
386
+ 5.240234375,
387
+ 6.1171875,
388
+ 5.146484375,
389
+ 3.80859375,
390
+ 2.8193359375,
391
+ 5.798828125,
392
+ 3.234375,
393
+ 0.8599853515625,
394
+ 1.47216796875,
395
+ 2.302490234375,
396
+ 0.5925039052963257,
397
+ 0.046706557273864746
398
+ ],
399
+ "layer_top1_probs": [
400
+ 0.649658203125,
401
+ 0.7109375,
402
+ 0.855712890625,
403
+ 0.517822265625,
404
+ 0.622802734375,
405
+ 0.5374755859375,
406
+ 0.53466796875,
407
+ 0.701171875,
408
+ 0.925048828125,
409
+ 0.89990234375,
410
+ 0.570068359375,
411
+ 0.23565673828125,
412
+ 0.3314208984375,
413
+ 0.119903564453125,
414
+ 0.4659423828125,
415
+ 0.3116455078125,
416
+ 0.23419189453125,
417
+ 0.1383056640625,
418
+ 0.2002410888671875,
419
+ 0.37921142578125,
420
+ 0.55419921875,
421
+ 0.19232177734375,
422
+ 0.330078125,
423
+ 0.711669921875,
424
+ 0.528564453125,
425
+ 0.5738525390625,
426
+ 0.7919921875,
427
+ 0.99169921875
428
+ ],
429
+ "layer_expected_probs": [
430
+ 2.092123031616211e-05,
431
+ 6.020069122314453e-06,
432
+ 1.8775463104248047e-06,
433
+ 4.172325134277344e-07,
434
+ 1.1920928955078125e-07,
435
+ 1.4901161193847656e-07,
436
+ 2.682209014892578e-07,
437
+ 0.0,
438
+ 2.9802322387695312e-08,
439
+ 2.9802322387695312e-08,
440
+ 5.960464477539063e-08,
441
+ 5.960464477539062e-07,
442
+ 2.980232238769531e-07,
443
+ 2.980232238769531e-07,
444
+ 7.748603820800781e-07,
445
+ 7.748603820800781e-07,
446
+ 4.172325134277344e-07,
447
+ 2.682209014892578e-07,
448
+ 5.960464477539063e-08,
449
+ 2.9802322387695312e-08,
450
+ 0.0,
451
+ 6.556510925292969e-07,
452
+ 1.1324882507324219e-06,
453
+ 5.662441253662109e-07,
454
+ 2.0265579223632812e-06,
455
+ 0.01308441162109375,
456
+ 0.58233642578125,
457
+ 0.99169921875
458
+ ],
459
+ "layer_old_probs": [
460
+ 0.0,
461
+ 0.0,
462
+ 0.0,
463
+ 0.0,
464
+ 0.0,
465
+ 0.0,
466
+ 0.0,
467
+ 0.0,
468
+ 0.0,
469
+ 0.0,
470
+ 0.0,
471
+ 0.0,
472
+ 0.0,
473
+ 0.0,
474
+ 0.0,
475
+ 0.0,
476
+ 0.0,
477
+ 0.0,
478
+ 0.0,
479
+ 0.0,
480
+ 0.0,
481
+ 0.0,
482
+ 0.0,
483
+ 0.0,
484
+ 0.0,
485
+ 0.0,
486
+ 0.0,
487
+ 0.0
488
+ ]
489
+ }
490
+ }
data/experiments/entropy_analysis/raw_results.json ADDED
The diff for this file is too large to render. See raw diff
 
data/experiments/entropy_analysis/signal_summary.json ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "DRIFTED (post-cutoff, answer changed)": {
3
+ "count": 16,
4
+ "avg_entropy": 2.4283905029296875,
5
+ "std_entropy": 1.4596738111148464,
6
+ "avg_top1_prob": 0.47666168212890625,
7
+ "std_top1_prob": 0.277152691495795,
8
+ "avg_expected_answer_prob": 0.01105436310172081,
9
+ "avg_expected_answer_rank": 1116.4375,
10
+ "avg_old_answer_prob": 0.14018350467085838,
11
+ "layer_avg_entropies": [
12
+ 5.52294921875,
13
+ 2.54351806640625,
14
+ 1.316253662109375,
15
+ 3.26361083984375,
16
+ 1.2166748046875,
17
+ 2.5682373046875,
18
+ 2.9808349609375,
19
+ 2.40643310546875,
20
+ 0.528564453125,
21
+ 1.335601806640625,
22
+ 2.7979736328125,
23
+ 6.949462890625,
24
+ 5.368408203125,
25
+ 5.93310546875,
26
+ 6.6044921875,
27
+ 6.924072265625,
28
+ 5.29052734375,
29
+ 5.44091796875,
30
+ 4.5015869140625,
31
+ 4.8775634765625,
32
+ 4.46875,
33
+ 3.368408203125,
34
+ 3.0117034912109375,
35
+ 5.2076416015625,
36
+ 5.4983367919921875,
37
+ 5.9327392578125,
38
+ 6.9853515625,
39
+ 1.7591285705566406
40
+ ],
41
+ "layer_avg_top1_probs": [
42
+ 0.334991455078125,
43
+ 0.734527587890625,
44
+ 0.86883544921875,
45
+ 0.51593017578125,
46
+ 0.6884765625,
47
+ 0.709991455078125,
48
+ 0.600433349609375,
49
+ 0.747467041015625,
50
+ 0.951385498046875,
51
+ 0.86517333984375,
52
+ 0.671905517578125,
53
+ 0.0781402587890625,
54
+ 0.2167205810546875,
55
+ 0.200653076171875,
56
+ 0.11643218994140625,
57
+ 0.10364151000976562,
58
+ 0.3043975830078125,
59
+ 0.32175445556640625,
60
+ 0.3555450439453125,
61
+ 0.21065902709960938,
62
+ 0.2103729248046875,
63
+ 0.37808990478515625,
64
+ 0.483154296875,
65
+ 0.19554710388183594,
66
+ 0.23350238800048828,
67
+ 0.20551586151123047,
68
+ 0.11835479736328125,
69
+ 0.5825424194335938
70
+ ],
71
+ "layer_avg_expected_probs": [
72
+ 1.300126314163208e-06,
73
+ 6.891787052154541e-07,
74
+ 2.905726432800293e-07,
75
+ 2.086162567138672e-07,
76
+ 7.078051567077637e-08,
77
+ 3.46451997756958e-07,
78
+ 1.7881393432617188e-07,
79
+ 1.564621925354004e-07,
80
+ 1.7508864402770996e-07,
81
+ 2.4959444999694824e-07,
82
+ 3.0174851417541504e-07,
83
+ 3.0919909477233887e-06,
84
+ 1.475214958190918e-06,
85
+ 2.1494925022125244e-06,
86
+ 2.25752592086792e-06,
87
+ 1.9781291484832764e-06,
88
+ 1.039355993270874e-06,
89
+ 5.848705768585205e-07,
90
+ 9.685754776000977e-08,
91
+ 3.725290298461914e-08,
92
+ 8.195638656616211e-08,
93
+ 1.601874828338623e-07,
94
+ 2.421438694000244e-07,
95
+ 3.028661012649536e-06,
96
+ 1.9855797290802e-06,
97
+ 1.2289732694625854e-05,
98
+ 0.0018126480281352997,
99
+ 0.00417274609208107
100
+ ],
101
+ "layer_avg_old_probs": [
102
+ 2.562999725341797e-06,
103
+ 1.1511147022247314e-06,
104
+ 4.284083843231201e-07,
105
+ 2.4586915969848633e-07,
106
+ 5.587935447692871e-08,
107
+ 2.8312206268310547e-07,
108
+ 1.601874828338623e-07,
109
+ 1.8998980522155762e-07,
110
+ 2.1979212760925293e-07,
111
+ 2.980232238769531e-07,
112
+ 2.123415470123291e-07,
113
+ 3.1366944313049316e-06,
114
+ 1.475214958190918e-06,
115
+ 2.7865171432495117e-06,
116
+ 9.793788194656372e-06,
117
+ 4.120171070098877e-06,
118
+ 1.2516975402832031e-06,
119
+ 6.705522537231445e-07,
120
+ 1.043081283569336e-07,
121
+ 2.60770320892334e-08,
122
+ 5.587935447692871e-08,
123
+ 1.4156103134155273e-07,
124
+ 8.307397365570068e-07,
125
+ 2.074986696243286e-06,
126
+ 8.113682270050049e-06,
127
+ 0.0030834004282951355,
128
+ 0.00276801735162735,
129
+ 0.04702179878950119
130
+ ]
131
+ },
132
+ "KNOWN_DRIFT (pre-cutoff, model knows change)": {
133
+ "count": 18,
134
+ "avg_entropy": 2.192626953125,
135
+ "std_entropy": 1.052990943659832,
136
+ "avg_top1_prob": 0.5486314561631944,
137
+ "std_top1_prob": 0.24056330318864452,
138
+ "avg_expected_answer_prob": 0.0022764967547522653,
139
+ "avg_expected_answer_rank": 594.9444444444445,
140
+ "avg_old_answer_prob": 0.0,
141
+ "layer_avg_entropies": [
142
+ 5.532769097222222,
143
+ 2.6904296875,
144
+ 1.4892578125,
145
+ 3.4954427083333335,
146
+ 1.192626953125,
147
+ 2.5450303819444446,
148
+ 2.9092881944444446,
149
+ 2.40625,
150
+ 0.5602891710069444,
151
+ 1.4476453993055556,
152
+ 3.0266927083333335,
153
+ 7.0546875,
154
+ 5.453342013888889,
155
+ 6.067057291666667,
156
+ 6.706380208333333,
157
+ 7.037760416666667,
158
+ 5.235243055555555,
159
+ 5.470920138888889,
160
+ 4.659830729166667,
161
+ 5.143446180555555,
162
+ 4.777886284722222,
163
+ 3.9322916666666665,
164
+ 2.8102213541666665,
165
+ 4.944769965277778,
166
+ 4.661973741319445,
167
+ 5.928276909722222,
168
+ 7.105251736111111,
169
+ 2.0204366048177085
170
+ ],
171
+ "layer_avg_top1_probs": [
172
+ 0.3287082248263889,
173
+ 0.7144097222222222,
174
+ 0.8493381076388888,
175
+ 0.4696044921875,
176
+ 0.7038031684027778,
177
+ 0.7146538628472222,
178
+ 0.6084255642361112,
179
+ 0.7483452690972222,
180
+ 0.9480523003472222,
181
+ 0.8536241319444444,
182
+ 0.6454671223958334,
183
+ 0.0778316921657986,
184
+ 0.21478271484375,
185
+ 0.1902737087673611,
186
+ 0.11335923936631945,
187
+ 0.09361097547743055,
188
+ 0.3209296332465278,
189
+ 0.33294677734375,
190
+ 0.3272298177083333,
191
+ 0.1654052734375,
192
+ 0.1807284884982639,
193
+ 0.2668389214409722,
194
+ 0.5159708658854166,
195
+ 0.242095947265625,
196
+ 0.31541358100043404,
197
+ 0.19419691297743055,
198
+ 0.09735192192925347,
199
+ 0.4987657335069444
200
+ ],
201
+ "layer_avg_expected_probs": [
202
+ 3.420644336276584e-06,
203
+ 2.9901663462320962e-06,
204
+ 1.3841523064507378e-06,
205
+ 1.0563267601860894e-06,
206
+ 1.7550256517198352e-07,
207
+ 7.980399661593967e-07,
208
+ 4.635916815863715e-07,
209
+ 3.7749608357747394e-07,
210
+ 3.3775965372721356e-07,
211
+ 7.020102606879341e-07,
212
+ 7.748603820800781e-07,
213
+ 4.516707526312934e-06,
214
+ 1.970264646742079e-06,
215
+ 3.741847144232856e-06,
216
+ 4.990233315361871e-06,
217
+ 3.4504466586642793e-06,
218
+ 1.4139546288384331e-06,
219
+ 1.394086413913303e-06,
220
+ 5.03328111436632e-07,
221
+ 1.1920928955078125e-07,
222
+ 9.602970547146268e-08,
223
+ 6.622738308376736e-08,
224
+ 4.967053731282552e-08,
225
+ 1.5232298109266494e-06,
226
+ 3.688865237765842e-06,
227
+ 1.6669432322184246e-05,
228
+ 0.001622299353281657,
229
+ 0.006558269262313843
230
+ ],
231
+ "layer_avg_old_probs": [
232
+ 0.0,
233
+ 0.0,
234
+ 0.0,
235
+ 0.0,
236
+ 0.0,
237
+ 0.0,
238
+ 0.0,
239
+ 0.0,
240
+ 0.0,
241
+ 0.0,
242
+ 0.0,
243
+ 0.0,
244
+ 0.0,
245
+ 0.0,
246
+ 0.0,
247
+ 0.0,
248
+ 0.0,
249
+ 0.0,
250
+ 0.0,
251
+ 0.0,
252
+ 0.0,
253
+ 0.0,
254
+ 0.0,
255
+ 0.0,
256
+ 0.0,
257
+ 0.0,
258
+ 0.0,
259
+ 0.0
260
+ ]
261
+ },
262
+ "NO_DRIFT (post-cutoff, could change but didn't)": {
263
+ "count": 14,
264
+ "avg_entropy": 3.041259765625,
265
+ "std_entropy": 1.2868754903974626,
266
+ "avg_top1_prob": 0.38180106026785715,
267
+ "std_top1_prob": 0.2012997163043124,
268
+ "avg_expected_answer_prob": 0.011885932513645716,
269
+ "avg_expected_answer_rank": 638.3571428571429,
270
+ "avg_old_answer_prob": 0.0,
271
+ "layer_avg_entropies": [
272
+ 5.503069196428571,
273
+ 2.8865792410714284,
274
+ 1.5439801897321428,
275
+ 3.3995535714285716,
276
+ 1.1728166852678572,
277
+ 2.6416015625,
278
+ 2.821986607142857,
279
+ 2.2252371651785716,
280
+ 0.4691859654017857,
281
+ 1.4714704241071428,
282
+ 2.9832589285714284,
283
+ 7.001674107142857,
284
+ 5.416015625,
285
+ 6.001116071428571,
286
+ 6.802455357142857,
287
+ 6.978794642857143,
288
+ 5.027483258928571,
289
+ 5.341657366071429,
290
+ 4.785993303571429,
291
+ 4.964006696428571,
292
+ 4.733677455357143,
293
+ 3.65771484375,
294
+ 3.9832589285714284,
295
+ 4.494698660714286,
296
+ 4.796247209821429,
297
+ 6.250558035714286,
298
+ 6.985630580357143,
299
+ 2.26318359375
300
+ ],
301
+ "layer_avg_top1_probs": [
302
+ 0.32688685825892855,
303
+ 0.6917898995535714,
304
+ 0.8450055803571429,
305
+ 0.47745186941964285,
306
+ 0.7064034598214286,
307
+ 0.705322265625,
308
+ 0.619140625,
309
+ 0.7668805803571429,
310
+ 0.9564383370535714,
311
+ 0.8492954799107143,
312
+ 0.6484026227678571,
313
+ 0.08108084542410714,
314
+ 0.20847865513392858,
315
+ 0.19773646763392858,
316
+ 0.09962027413504464,
317
+ 0.10276576450892858,
318
+ 0.33909388950892855,
319
+ 0.35335867745535715,
320
+ 0.30741664341517855,
321
+ 0.20300728934151785,
322
+ 0.20433262416294642,
323
+ 0.30856759207589285,
324
+ 0.27522059849330355,
325
+ 0.2831638881138393,
326
+ 0.29340689522879465,
327
+ 0.1388059343610491,
328
+ 0.10486602783203125,
329
+ 0.49221365792410715
330
+ ],
331
+ "layer_avg_expected_probs": [
332
+ 7.450580596923828e-07,
333
+ 4.981245313371931e-07,
334
+ 1.873288835797991e-07,
335
+ 4.470348358154297e-07,
336
+ 8.089201790945871e-08,
337
+ 4.896095820835658e-07,
338
+ 3.6188534327915735e-07,
339
+ 3.10795647757394e-07,
340
+ 1.1069434029715402e-07,
341
+ 2.2990362984793527e-07,
342
+ 3.405979701450893e-07,
343
+ 1.230410167149135e-06,
344
+ 6.130763462611608e-07,
345
+ 1.1793204716273716e-06,
346
+ 1.4262539999825613e-06,
347
+ 1.4347689492361887e-06,
348
+ 8.600098746163505e-07,
349
+ 4.470348358154297e-07,
350
+ 1.4901161193847656e-07,
351
+ 1.9158635820661273e-07,
352
+ 2.086162567138672e-07,
353
+ 1.532690865652902e-07,
354
+ 2.1287373134068081e-07,
355
+ 1.0601111820765903e-06,
356
+ 4.104205540248326e-06,
357
+ 4.9591064453125e-05,
358
+ 0.005290542330060687,
359
+ 0.0049742162227630615
360
+ ],
361
+ "layer_avg_old_probs": [
362
+ 0.0,
363
+ 0.0,
364
+ 0.0,
365
+ 0.0,
366
+ 0.0,
367
+ 0.0,
368
+ 0.0,
369
+ 0.0,
370
+ 0.0,
371
+ 0.0,
372
+ 0.0,
373
+ 0.0,
374
+ 0.0,
375
+ 0.0,
376
+ 0.0,
377
+ 0.0,
378
+ 0.0,
379
+ 0.0,
380
+ 0.0,
381
+ 0.0,
382
+ 0.0,
383
+ 0.0,
384
+ 0.0,
385
+ 0.0,
386
+ 0.0,
387
+ 0.0,
388
+ 0.0,
389
+ 0.0
390
+ ]
391
+ },
392
+ "STABLE (post-cutoff, never changes)": {
393
+ "count": 2,
394
+ "avg_entropy": 0.0290374755859375,
395
+ "std_entropy": 0.0290374755859375,
396
+ "avg_top1_prob": 0.994873046875,
397
+ "std_top1_prob": 0.005126953125,
398
+ "avg_expected_answer_prob": 0.994873046875,
399
+ "avg_expected_answer_rank": 0.0,
400
+ "avg_old_answer_prob": 0.0,
401
+ "layer_avg_entropies": [
402
+ 3.388671875,
403
+ 3.06640625,
404
+ 1.544921875,
405
+ 3.615234375,
406
+ 2.02978515625,
407
+ 3.2109375,
408
+ 2.7822265625,
409
+ 2.4091796875,
410
+ 0.6947021484375,
411
+ 0.908447265625,
412
+ 2.642578125,
413
+ 5.40625,
414
+ 3.8681640625,
415
+ 5.76953125,
416
+ 4.310546875,
417
+ 5.1875,
418
+ 5.240234375,
419
+ 6.1171875,
420
+ 5.146484375,
421
+ 3.80859375,
422
+ 2.8193359375,
423
+ 5.798828125,
424
+ 3.234375,
425
+ 0.8599853515625,
426
+ 1.47216796875,
427
+ 2.302490234375,
428
+ 0.5925039052963257,
429
+ 0.046706557273864746
430
+ ],
431
+ "layer_avg_top1_probs": [
432
+ 0.649658203125,
433
+ 0.7109375,
434
+ 0.855712890625,
435
+ 0.517822265625,
436
+ 0.622802734375,
437
+ 0.5374755859375,
438
+ 0.53466796875,
439
+ 0.701171875,
440
+ 0.925048828125,
441
+ 0.89990234375,
442
+ 0.570068359375,
443
+ 0.23565673828125,
444
+ 0.3314208984375,
445
+ 0.119903564453125,
446
+ 0.4659423828125,
447
+ 0.3116455078125,
448
+ 0.23419189453125,
449
+ 0.1383056640625,
450
+ 0.2002410888671875,
451
+ 0.37921142578125,
452
+ 0.55419921875,
453
+ 0.19232177734375,
454
+ 0.330078125,
455
+ 0.711669921875,
456
+ 0.528564453125,
457
+ 0.5738525390625,
458
+ 0.7919921875,
459
+ 0.99169921875
460
+ ],
461
+ "layer_avg_expected_probs": [
462
+ 2.092123031616211e-05,
463
+ 6.020069122314453e-06,
464
+ 1.8775463104248047e-06,
465
+ 4.172325134277344e-07,
466
+ 1.1920928955078125e-07,
467
+ 1.4901161193847656e-07,
468
+ 2.682209014892578e-07,
469
+ 0.0,
470
+ 2.9802322387695312e-08,
471
+ 2.9802322387695312e-08,
472
+ 5.960464477539063e-08,
473
+ 5.960464477539062e-07,
474
+ 2.980232238769531e-07,
475
+ 2.980232238769531e-07,
476
+ 7.748603820800781e-07,
477
+ 7.748603820800781e-07,
478
+ 4.172325134277344e-07,
479
+ 2.682209014892578e-07,
480
+ 5.960464477539063e-08,
481
+ 2.9802322387695312e-08,
482
+ 0.0,
483
+ 6.556510925292969e-07,
484
+ 1.1324882507324219e-06,
485
+ 5.662441253662109e-07,
486
+ 2.0265579223632812e-06,
487
+ 0.01308441162109375,
488
+ 0.58233642578125,
489
+ 0.99169921875
490
+ ],
491
+ "layer_avg_old_probs": [
492
+ 0.0,
493
+ 0.0,
494
+ 0.0,
495
+ 0.0,
496
+ 0.0,
497
+ 0.0,
498
+ 0.0,
499
+ 0.0,
500
+ 0.0,
501
+ 0.0,
502
+ 0.0,
503
+ 0.0,
504
+ 0.0,
505
+ 0.0,
506
+ 0.0,
507
+ 0.0,
508
+ 0.0,
509
+ 0.0,
510
+ 0.0,
511
+ 0.0,
512
+ 0.0,
513
+ 0.0,
514
+ 0.0,
515
+ 0.0,
516
+ 0.0,
517
+ 0.0,
518
+ 0.0,
519
+ 0.0
520
+ ]
521
+ }
522
+ }
data/experiments/tier1_gemma2_v2/all_layer_results.json ADDED
The diff for this file is too large to render. See raw diff
 
data/experiments/tier1_gemma2_v2/cached_states.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00d7efe60b6dadc21ab51a08e9ce403d40d6d468741c5f51dbaf726238dc9d5a
3
+ size 5351543162
data/experiments/tier1_gemma2_v2/figures/fig1_dashboard.png ADDED

Git LFS Details

  • SHA256: c520f2639a05572387f7f8a6779de3ab851a47068f3dcb38bbc2725b3f9e5de5
  • Pointer size: 131 Bytes
  • Size of remote file: 281 kB
data/experiments/tier1_gemma2_v2/figures/fig2_per_relation.png ADDED

Git LFS Details

  • SHA256: fd596481ec0b9a0e4d3ca6714157f84cdab82a0917a5267853754c600008ba78
  • Pointer size: 131 Bytes
  • Size of remote file: 160 kB
data/experiments/tier1_gemma2_v2/figures/fig3_2x2_cells.png ADDED

Git LFS Details

  • SHA256: d4214a73e4e88050c4ef0e4dd045d2a43139023d978dc9b89600dff053750760
  • Pointer size: 130 Bytes
  • Size of remote file: 92.4 kB
data/experiments/tier1_gemma2_v2/figures/fig4_pca_projections.png ADDED

Git LFS Details

  • SHA256: 3f5900ea6bed6e9e47ba52f5a09046b76102c05ff32732fcca16d769993029c9
  • Pointer size: 132 Bytes
  • Size of remote file: 3.64 MB
data/experiments/tier1_gemma2_v2/figures/fig5_cosine_matrix.png ADDED

Git LFS Details

  • SHA256: 02df151b4bcc65f229ba996f6bd875903ae56ef833e9e5feb9847b3c101b7a6c
  • Pointer size: 131 Bytes
  • Size of remote file: 105 kB
data/experiments/tier1_gemma2_v2/figures/fig6_sparsity_tradeoff.png ADDED

Git LFS Details

  • SHA256: 8c71530976792c79c128017053fa54092b28e06032827ac1f770d90f57b5f204
  • Pointer size: 131 Bytes
  • Size of remote file: 159 kB
data/experiments/tier1_gemma2_v2/figures/fig7_neuron_overlap.png ADDED

Git LFS Details

  • SHA256: 33c96616b16e84c548464998daa78bb542d0b021c214f3e9a7747bd7bd903e74
  • Pointer size: 131 Bytes
  • Size of remote file: 179 kB
data/experiments/tier1_gemma2_v2/figures/fig8_weight_distributions.png ADDED

Git LFS Details

  • SHA256: 9647301e5d50d317dc6e017f51c78d4610afde6a3d2a2243e974df8bc3fd596a
  • Pointer size: 131 Bytes
  • Size of remote file: 147 kB
data/experiments/tier1_gemma2_v2/final_results.json ADDED
The diff for this file is too large to render. See raw diff
 
data/experiments/tier1_gemma2_v2/per_layer/layer_00.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 0,
3
+ "drift_auroc": 0.893690467142311,
4
+ "drift_C": 1.0,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9468121942708154,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9911295549792435,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": 0.04864960163831711,
13
+ "cos_drift_correctness": 0.019523993134498596,
14
+ "cos_uncertainty_correctness": 0.02531621605157852,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.06004580855369568,
21
+ "std": 0.09024611115455627
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.4490325152873993,
26
+ "std": 0.19269588589668274
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.02900705859065056,
31
+ "std": 0.06720896065235138
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.3332471251487732,
36
+ "std": 0.22961613535881042
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.7026691136224604,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9078858727663971,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.8167778352619494,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.49824870630239754,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.7720848056537103,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.5192238429677651,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9392068688802766,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.739601495897723,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.8821834338392397,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 166.7998149394989
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_01.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 1,
3
+ "drift_auroc": 0.9011490479613139,
4
+ "drift_C": 1.0,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.950048076452063,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9964158507941018,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": 0.0267711840569973,
13
+ "cos_drift_correctness": -0.005616667214781046,
14
+ "cos_uncertainty_correctness": 0.005185432732105255,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.04730555787682533,
21
+ "std": 0.08764217793941498
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.5626447200775146,
26
+ "std": 0.21129858493804932
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.026469526812434196,
31
+ "std": 0.06594817340373993
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.387656569480896,
36
+ "std": 0.24772387742996216
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.6954280562799832,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9146280954227356,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.8389002062819181,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.4647895811206773,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.8345111896348646,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.5603877783780539,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.951401989627087,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.8182645131202335,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9043420448793534,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 169.61970496177673
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_02.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 2,
3
+ "drift_auroc": 0.896794238503098,
4
+ "drift_C": 1.0,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9524823664516303,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9976293574168594,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": 0.01810559071600437,
13
+ "cos_drift_correctness": -0.007012477610260248,
14
+ "cos_uncertainty_correctness": -0.0047056423500180244,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.0446111224591732,
21
+ "std": 0.08681754022836685
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.5915916562080383,
26
+ "std": 0.20707546174526215
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.02458667755126953,
31
+ "std": 0.06488249450922012
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.4164687991142273,
36
+ "std": 0.24728882312774658
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.6977719823561608,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9117476343121672,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.8143547310449092,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.5125660030581731,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.7555948174322733,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.5532444924665346,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9506697066023784,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.8079059184598046,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9099473701292581,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 173.2671389579773
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_03.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 3,
3
+ "drift_auroc": 0.9225361554547803,
4
+ "drift_C": 1.0,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9638786049961436,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9974697877989147,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": 0.010065648704767227,
13
+ "cos_drift_correctness": 0.023633282631635666,
14
+ "cos_uncertainty_correctness": 0.00039188837399706244,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.031129170209169388,
21
+ "std": 0.09004193544387817
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.7261084914207458,
26
+ "std": 0.2061491310596466
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.017781062051653862,
31
+ "std": 0.06107195094227791
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.5275082588195801,
36
+ "std": 0.2770618796348572
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.755339837084259,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9377751829455391,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.8435954054343507,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.5274670643798159,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.8498233215547703,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.6006663065009904,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9578111554132155,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.824638788878667,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9352960264668337,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 162.03071880340576
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_04.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 4,
3
+ "drift_auroc": 0.9396707206563885,
4
+ "drift_C": 0.1,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9691975303229995,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9975606877008757,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": -0.020001864060759544,
13
+ "cos_drift_correctness": 0.007599391043186188,
14
+ "cos_uncertainty_correctness": -0.01379857212305069,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.04271675646305084,
21
+ "std": 0.09466906636953354
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.6230219006538391,
26
+ "std": 0.21390636265277863
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.021475067362189293,
31
+ "std": 0.06276922672986984
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.4303804934024811,
36
+ "std": 0.28279992938041687
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.7960913100872533,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9670924954097121,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.8954248328976688,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.511530291731634,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.9275618374558303,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.698046101206555,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9653269451749541,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.8409110969771744,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9528741193055105,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 133.53380727767944
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_05.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 5,
3
+ "drift_auroc": 0.9439126223777906,
4
+ "drift_C": 0.1,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9726526636296121,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9979646479961378,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": -0.03619128465652466,
13
+ "cos_drift_correctness": 0.0033732375595718622,
14
+ "cos_uncertainty_correctness": -0.001980707747861743,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.043339140713214874,
21
+ "std": 0.0970807820558548
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.6173967719078064,
26
+ "std": 0.21449479460716248
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.021661318838596344,
31
+ "std": 0.061699919402599335
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.42544519901275635,
36
+ "std": 0.2802523374557495
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.8142341994268971,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9642101597055112,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.905951018952588,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.5180063121897573,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.9405182567726738,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.7359760489825319,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.967296646088255,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.8937458171329394,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9624574404082189,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 144.82072019577026
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_06.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 6,
3
+ "drift_auroc": 0.9460011324345245,
4
+ "drift_C": 0.1,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9749156173477416,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9979849697082791,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": -0.02005905658006668,
13
+ "cos_drift_correctness": -0.008644387125968933,
14
+ "cos_uncertainty_correctness": 0.035370659083127975,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.03948484733700752,
21
+ "std": 0.09584105759859085
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.6535050868988037,
26
+ "std": 0.21575653553009033
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.020458266139030457,
31
+ "std": 0.05958631634712219
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.449584424495697,
36
+ "std": 0.2899908125400543
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.8088702147525676,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9679368025155073,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.9193024701886645,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.5090182209869011,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.9658421672555947,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.8011960501830843,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9661937414455942,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.8923783272065241,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9598720056148711,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 144.84658360481262
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_07.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 7,
3
+ "drift_auroc": 0.9486147092727818,
4
+ "drift_C": 0.1,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9779143750837568,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9976602817614224,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": -0.030850570648908615,
13
+ "cos_drift_correctness": 0.026000214740633965,
14
+ "cos_uncertainty_correctness": 0.027471689507365227,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.04007749259471893,
21
+ "std": 0.09756413847208023
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.6484773755073547,
26
+ "std": 0.21335306763648987
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.02045559510588646,
31
+ "std": 0.058983251452445984
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.4480625092983246,
36
+ "std": 0.2810366153717041
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.811491033194887,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9661582571409685,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.9180203386302509,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.5355230153887872,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.9517078916372202,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.7831952698241191,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9660191224328895,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.9027776550873549,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9633052911950717,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 143.7363555431366
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_08.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 8,
3
+ "drift_auroc": 0.9436369438142326,
4
+ "drift_C": 0.1,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9807194646000107,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9980536040821549,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": -0.039751965552568436,
13
+ "cos_drift_correctness": -0.004410579800605774,
14
+ "cos_uncertainty_correctness": 0.045971233397722244,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.039205152541399,
21
+ "std": 0.09732519090175629
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.6585108637809753,
26
+ "std": 0.20939327776432037
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.020373612642288208,
31
+ "std": 0.058575961738824844
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.4399964213371277,
36
+ "std": 0.2834493815898895
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.7786020155188512,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9747400974591364,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.9201636655584751,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.6448093914536868,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.934040047114252,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.7335449306681072,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.969746209879359,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.8931768841147592,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9596029915273391,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 144.35505199432373
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_09.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 9,
3
+ "drift_auroc": 0.9478720128144871,
4
+ "drift_C": 0.1,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9828307119971361,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9982379131757501,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": -0.02433849684894085,
13
+ "cos_drift_correctness": -0.025808092206716537,
14
+ "cos_uncertainty_correctness": 0.021662255749106407,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.03769109770655632,
21
+ "std": 0.0971510037779808
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.6718498468399048,
26
+ "std": 0.21020126342773438
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.02007238380610943,
31
+ "std": 0.05859347805380821
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.44910070300102234,
36
+ "std": 0.28751757740974426
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.7936089378280048,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.978615622468682,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.9264690810396488,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.7253105250868114,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.9422850412249706,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.7607374392220422,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9689145730673178,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.9109617197467501,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9581572719954963,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 151.38029074668884
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_10.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 10,
3
+ "drift_auroc": 0.9496738294032827,
4
+ "drift_C": 0.1,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9843212838384111,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9980495750818347,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": -0.025705965235829353,
13
+ "cos_drift_correctness": -0.003326516365632415,
14
+ "cos_uncertainty_correctness": 0.043761368840932846,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.037546705454587936,
21
+ "std": 0.09729484468698502
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.6742916107177734,
26
+ "std": 0.21287156641483307
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.019854165613651276,
31
+ "std": 0.057867392897605896
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.44933104515075684,
36
+ "std": 0.2848002016544342
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.7984996297369523,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9788993767469486,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.9292829894766189,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.7186141805828608,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.9452296819787986,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.7308586949996997,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9713187082550228,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.912162192953195,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9617783722458123,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 147.01239705085754
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_11.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 11,
3
+ "drift_auroc": 0.9478580906085624,
4
+ "drift_C": 0.1,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9850862714869608,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9979276801510947,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": -0.005517570301890373,
13
+ "cos_drift_correctness": -0.017856759950518608,
14
+ "cos_uncertainty_correctness": 0.03847955912351608,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.03860519826412201,
21
+ "std": 0.09878534823656082
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.664135754108429,
26
+ "std": 0.2145092636346817
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.02003399096429348,
31
+ "std": 0.057298652827739716
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.4452461898326874,
36
+ "std": 0.2879909873008728
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.8065778035352071,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9797034787524678,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.9263910949148041,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.7518605141647647,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.9575971731448764,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.722619905156372,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9714232593694972,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.9172479037815432,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9616165103652458,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 151.30345034599304
92
+ }
data/experiments/tier1_gemma2_v2/per_layer/layer_12.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "layer": 12,
3
+ "drift_auroc": 0.9473017250347686,
4
+ "drift_C": 0.1,
5
+ "drift_active_neurons": 3584,
6
+ "uncertainty_auroc": 0.9866268553564509,
7
+ "uncertainty_active_neurons": 3584,
8
+ "uncertainty_threshold": 0.5,
9
+ "correctness_auroc": 0.9980232805534291,
10
+ "n_correct": 14955,
11
+ "n_wrong": 131,
12
+ "cos_drift_uncertainty": -0.027162229642271996,
13
+ "cos_drift_correctness": -0.006445594131946564,
14
+ "cos_uncertainty_correctness": 0.007319244556128979,
15
+ "neuron_overlap_ratio": 1.0,
16
+ "neuron_overlap_note": "L2=all active. See sparse_neuron_analysis for L1.",
17
+ "cell_analysis": {
18
+ "A_confident_stable": {
19
+ "n": 8481,
20
+ "mean": 0.03960072249174118,
21
+ "std": 0.10105704516172409
22
+ },
23
+ "B_confident_drifted": {
24
+ "n": 954,
25
+ "mean": 0.6566126942634583,
26
+ "std": 0.21807824075222015
27
+ },
28
+ "C_uncertain_stable": {
29
+ "n": 5441,
30
+ "mean": 0.02034013159573078,
31
+ "std": 0.057039156556129456
32
+ },
33
+ "D_uncertain_drifted": {
34
+ "n": 210,
35
+ "mean": 0.4324892461299896,
36
+ "std": 0.28674644231796265
37
+ }
38
+ },
39
+ "per_relation": {
40
+ "holds_position": {
41
+ "auroc": 0.8117260697382401,
42
+ "n_drifted": 44,
43
+ "n_stable": 1479
44
+ },
45
+ "CEO": {
46
+ "auroc": 0.9780779100846232,
47
+ "n_drifted": 135,
48
+ "n_stable": 1163
49
+ },
50
+ "head_coach": {
51
+ "auroc": 0.9285790932775441,
52
+ "n_drifted": 372,
53
+ "n_stable": 2264
54
+ },
55
+ "works_for": {
56
+ "auroc": 0.7475463057118539,
57
+ "n_drifted": 7,
58
+ "n_stable": 893
59
+ },
60
+ "owned_by": {
61
+ "auroc": 1.0,
62
+ "n_drifted": 5,
63
+ "n_stable": 735
64
+ },
65
+ "member_of_party": {
66
+ "auroc": 0.9634864546525325,
67
+ "n_drifted": 6,
68
+ "n_stable": 849
69
+ },
70
+ "plays_for": {
71
+ "auroc": 0.7207665526142025,
72
+ "n_drifted": 25,
73
+ "n_stable": 1851
74
+ },
75
+ "head_of_state": {
76
+ "auroc": 0.9708467476737795,
77
+ "n_drifted": 223,
78
+ "n_stable": 1255
79
+ },
80
+ "chair_of": {
81
+ "auroc": 0.9255170630128197,
82
+ "n_drifted": 70,
83
+ "n_stable": 788
84
+ },
85
+ "head_of_government": {
86
+ "auroc": 0.9610409550531096,
87
+ "n_drifted": 277,
88
+ "n_stable": 1959
89
+ }
90
+ },
91
+ "elapsed_seconds": 163.91591429710388
92
+ }