knowledge-drift-experiments / disentanglement_v2.1.py
Raniahossam33's picture
Upload folder using huggingface_hub
14b2318 verified
"""
Enhanced Drift vs Uncertainty Disentanglement Analysis β€” GPU Edition
=====================================================================
KEY FIX: All probe training now runs on GPU via PyTorch (not sklearn CPU).
Expected speedup: ~100x. All 28 layers in ~30 min instead of 420 hours.
Usage:
# Full run (all layers)
CUDA_VISIBLE_DEVICES=0 python disentanglement_v2.py \
--model Qwen/Qwen2.5-7B-Instruct \
--dataset data/tier1_qwen25.json \
--output_dir data/experiments/tier1_qwen25
# Resume from cached states (skip extraction, GPU only for probing)
CUDA_VISIBLE_DEVICES=0 python disentanglement_v2.py \
--model Qwen/Qwen2.5-7B-Instruct \
--dataset data/tier1_qwen25.json \
--output_dir data/experiments/tier1_qwen25 \
--skip_extraction
# Specific layers only
CUDA_VISIBLE_DEVICES=0 python disentanglement_v2.py \
--model Qwen/Qwen2.5-7B-Instruct \
--dataset data/tier1_qwen25.json \
--output_dir data/experiments/tier1_qwen25 \
--skip_extraction \
--layers 20 21 22 23 24 25 26 27
"""
import argparse
import json
import os
import sys
import logging
import warnings
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
warnings.filterwarnings("ignore")
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler()],
)
logger = logging.getLogger(__name__)
# ============================================================================
# GPU PROBE β€” replaces sklearn LogisticRegression entirely
# ============================================================================
class LinearProbeGPU:
"""
L2-regularized logistic regression trained fully on GPU via PyTorch.
Drop-in replacement for sklearn LogisticRegression.
coef_[0] is accessible for cosine similarity / neuron analysis.
"""
def __init__(self, input_dim, C=1.0, lr=0.05, max_iter=500, device="cuda"):
self.C = C
self.lr = lr
self.max_iter = max_iter
self.device = device
self.model = nn.Linear(input_dim, 1, bias=True).to(device)
self.coef_ = None # filled after fit
def fit(self, X_t, y_t):
"""X_t, y_t are already torch tensors on self.device."""
nn.init.zeros_(self.model.weight)
nn.init.zeros_(self.model.bias)
# weight_decay = 1/(C*N) reproduces sklearn L2 penalty
wd = 1.0 / (self.C * len(y_t) + 1e-8)
optimizer = torch.optim.LBFGS(
self.model.parameters(), lr=self.lr, max_iter=self.max_iter,
tolerance_grad=1e-5, tolerance_change=1e-7
)
criterion = nn.BCEWithLogitsLoss()
def closure():
optimizer.zero_grad()
logits = self.model(X_t).squeeze(1)
loss = criterion(logits, y_t)
# L2 regularization
loss = loss + wd * self.model.weight.pow(2).sum()
loss.backward()
return loss
optimizer.step(closure)
self.coef_ = [self.model.weight.detach().cpu().numpy().flatten()]
return self
def predict_proba(self, X_t):
"""X_t is torch tensor on self.device. Returns numpy (N,2)."""
with torch.no_grad():
logits = self.model(X_t).squeeze(1)
probs = torch.sigmoid(logits).cpu().numpy()
return np.column_stack([1 - probs, probs])
def prepare_tensors(X_np, y_np, device):
"""Normalize X and move everything to GPU."""
X = X_np.astype(np.float32)
X = np.nan_to_num(X, nan=0.0, posinf=1e4, neginf=-1e4)
X = np.clip(X, -1e4, 1e4)
# StandardScaler on CPU, then send to GPU
mean = X.mean(0, keepdims=True)
std = X.std(0, keepdims=True) + 1e-8
X_scaled = (X - mean) / std
X_t = torch.tensor(X_scaled, dtype=torch.float32, device=device)
y_t = torch.tensor(y_np.astype(np.float32), device=device)
return X_t, y_t, mean, std
def train_probe_gpu(X_np, y_np, C=1.0, max_iter=500, device="cuda", n_splits=3):
"""
Full GPU probe training with stratified k-fold CV.
Returns (probe, mean, std, cv_auroc).
"""
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
X_t, y_t, mean, std = prepare_tensors(X_np, y_np, device)
input_dim = X_t.shape[1]
min_class = min(int((y_np == 0).sum()), int((y_np == 1).sum()))
n_splits_actual = min(n_splits, min_class)
aurocs = []
if n_splits_actual >= 2:
skf = StratifiedKFold(n_splits=n_splits_actual, shuffle=True, random_state=42)
for train_idx, val_idx in skf.split(X_np, y_np):
probe = LinearProbeGPU(input_dim, C=C, max_iter=max_iter, device=device)
probe.fit(X_t[train_idx], y_t[train_idx])
probs = probe.predict_proba(X_t[val_idx])[:, 1]
y_val = y_np[val_idx]
if len(np.unique(y_val)) > 1:
aurocs.append(roc_auc_score(y_val, probs))
# Final probe on full data
probe_final = LinearProbeGPU(input_dim, C=C, max_iter=max_iter, device=device)
probe_final.fit(X_t, y_t)
cv_auroc = float(np.mean(aurocs)) if aurocs else 0.5
# Wrap so downstream code can call .predict_proba(X_np)
class ProbeWrapper:
def __init__(self, probe, mean, std, device):
self._probe = probe
self._mean = mean
self._std = std
self._device = device
self.coef_ = probe.coef_
def predict_proba(self, X_np_in):
X = X_np_in.astype(np.float32)
X = np.nan_to_num(X, nan=0.0, posinf=1e4, neginf=-1e4)
X = np.clip(X, -1e4, 1e4)
X_scaled = (X - self._mean) / self._std
X_t = torch.tensor(X_scaled, dtype=torch.float32, device=self._device)
return self._probe.predict_proba(X_t)
return ProbeWrapper(probe_final, mean, std, device), cv_auroc
def best_probe_gpu(X_np, y_np, C_list, max_iter=500, device="cuda", n_splits=3):
"""Grid search over C values, return best probe + auroc."""
best_auroc, best_probe = 0.0, None
best_C = C_list[0]
for C_val in C_list:
probe, auroc = train_probe_gpu(X_np, y_np, C=C_val,
max_iter=max_iter, device=device,
n_splits=n_splits)
if auroc > best_auroc:
best_auroc = auroc
best_probe = probe
best_C = C_val
return best_probe, best_auroc, best_C
def cosine_sim(w1, w2):
n1, n2 = np.linalg.norm(w1), np.linalg.norm(w2)
if n1 == 0 or n2 == 0:
return 0.0
return float(np.dot(w1, w2) / (n1 * n2))
# ============================================================================
# MODEL LOADING
# ============================================================================
def load_model(model_name, device="auto"):
from transformers import AutoModelForCausalLM, AutoTokenizer
logger.info(f"Loading model: {model_name}")
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
load_kwargs = dict(trust_remote_code=True, output_hidden_states=True,
torch_dtype=torch.float16)
try:
model = AutoModelForCausalLM.from_pretrained(
model_name, device_map=device, **load_kwargs)
except Exception as e:
logger.warning(f"device_map failed ({e}). Loading to cuda manually...")
model = AutoModelForCausalLM.from_pretrained(model_name, **load_kwargs)
if torch.cuda.is_available():
model = model.cuda()
model.eval()
logger.info(f"Model loaded. Layers: {model.config.num_hidden_layers}, "
f"Hidden dim: {model.config.hidden_size}")
return model, tokenizer
# ============================================================================
# HIDDEN STATE EXTRACTION
# ============================================================================
def extract_hidden_states(model, tokenizer, samples, output_dir, max_samples=None):
if max_samples:
samples = samples[:max_samples]
num_layers = model.config.num_hidden_layers
results = []
logger.info(f"Extracting hidden states for {len(samples)} samples "
f"across {num_layers} layers...")
for idx, sample in enumerate(samples):
query = sample.get("query", sample.get("question", ""))
expected_answer = sample.get("expected_answer", sample.get("answer", ""))
is_drifted = sample.get("is_drifted", False)
category = sample.get("category", "unknown")
relation = sample.get("relation", "unknown")
inputs = tokenizer(query, return_tensors="pt",
truncation=True, max_length=512)
inputs = {k: v.to(model.device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
hidden_states = {}
for layer_idx in range(num_layers):
h = outputs.hidden_states[layer_idx + 1][0, -1, :].float().cpu()
h = torch.clamp(h, -1e6, 1e6)
h[torch.isnan(h)] = 0.0
hidden_states[layer_idx] = h.numpy()
logits = outputs.logits[0, -1, :].float().cpu()
logits = torch.clamp(logits, -1e4, 1e4)
logits[torch.isnan(logits)] = 0.0
probs = torch.softmax(logits, dim=-1)
top_prob = probs.max().item()
top_token = tokenizer.decode([probs.argmax().item()]).strip()
entropy = -(probs * torch.log(probs + 1e-10)).sum().item()
top_answer_matches = (
expected_answer.lower() in top_token.lower()
or top_token.lower() in expected_answer.lower()
)
results.append({
"idx": idx,
"query": query,
"expected_answer": expected_answer,
"is_drifted": is_drifted,
"category": category,
"relation": relation,
"hidden_states": hidden_states,
"top_prob": top_prob,
"top_token": top_token,
"entropy": entropy,
"top_answer_matches": top_answer_matches,
})
if (idx + 1) % 500 == 0:
logger.info(f" Processed {idx + 1}/{len(samples)}")
logger.info(f"Extraction complete. {len(results)} samples.")
cache_path = os.path.join(output_dir, "cached_states.npz")
np.savez_compressed(cache_path, results=np.array(results, dtype=object))
logger.info(f"Cached to {cache_path}")
del model
if torch.cuda.is_available():
torch.cuda.empty_cache()
return results
# ============================================================================
# PER-LAYER ANALYSIS
# ============================================================================
def analyze_single_layer(layer, results, confidence_threshold=0.5,
probe_device="cuda", C_list=None, max_iter=500):
import time
from sklearn.metrics import roc_auc_score
if C_list is None:
C_list = [0.01, 0.1, 1.0] # 3 values is enough; grid search on GPU is fast
t0 = time.time()
drifted = [r for r in results if r["is_drifted"]]
non_drifted = [r for r in results if not r["is_drifted"]]
# ── Drift probe ──────────────────────────────────────────────────────────
X_all = np.array([r["hidden_states"][layer] for r in results])
y_drift = np.array([1 if r["is_drifted"] else 0 for r in results])
drift_probe, best_drift_auroc, best_C = best_probe_gpu(
X_all, y_drift, C_list=C_list, max_iter=max_iter, device=probe_device)
w_drift = drift_probe.coef_[0]
n_active_drift = int(np.sum(w_drift != 0))
# ── Uncertainty probe (trained on non-drifted only) ───────────────────
actual_threshold = confidence_threshold
unc_labels = np.array(
[0 if r["top_prob"] >= confidence_threshold else 1 for r in non_drifted])
if unc_labels.sum() < 5 or (len(unc_labels) - unc_labels.sum()) < 5:
median_conf = float(np.median([r["top_prob"] for r in non_drifted]))
unc_labels = np.array(
[0 if r["top_prob"] >= median_conf else 1 for r in non_drifted])
actual_threshold = median_conf
X_unc = np.array([r["hidden_states"][layer] for r in non_drifted])
unc_probe, best_unc_auroc, _ = best_probe_gpu(
X_unc, unc_labels, C_list=C_list, max_iter=max_iter, device=probe_device)
w_unc = unc_probe.coef_[0]
n_active_unc = int(np.sum(w_unc != 0))
# ── Correctness probe ────────────────────────────────────────────────
y_correct = np.array([1 if r["top_answer_matches"] else 0 for r in results])
n_correct = int(y_correct.sum())
n_wrong = int((1 - y_correct).sum())
w_corr = np.zeros_like(w_drift)
best_corr_auroc = 0.5
if n_correct >= 5 and n_wrong >= 5:
corr_probe, best_corr_auroc, _ = best_probe_gpu(
X_all, y_correct, C_list=C_list, max_iter=max_iter, device=probe_device)
w_corr = corr_probe.coef_[0]
# ── 3-way cosine similarities ────────────────────────────────────────
cos_drift_unc = cosine_sim(w_drift, w_unc)
cos_drift_corr = cosine_sim(w_drift, w_corr)
cos_unc_corr = cosine_sim(w_unc, w_corr)
# ── 2Γ—2 factorial ────────────────────────────────────────────────────
ct = actual_threshold
cells = {}
for cell_name, cell_samples in [
("A_confident_stable", [r for r in non_drifted if r["top_prob"] >= ct]),
("B_confident_drifted", [r for r in drifted if r["top_prob"] >= ct]),
("C_uncertain_stable", [r for r in non_drifted if r["top_prob"] < ct]),
("D_uncertain_drifted", [r for r in drifted if r["top_prob"] < ct]),
]:
if cell_samples:
X_cell = np.array([r["hidden_states"][layer] for r in cell_samples])
probs = drift_probe.predict_proba(X_cell)[:, 1]
cells[cell_name] = {
"n": len(cell_samples),
"mean": float(np.mean(probs)),
"std": float(np.std(probs)),
}
# ── Per-relation AUROC ────────────────────────────────────────────────
per_relation = {}
for rel in set(r["relation"] for r in results):
rel_samples = [r for r in results if r["relation"] == rel]
rel_drifted = sum(1 for r in rel_samples if r["is_drifted"])
rel_stable = len(rel_samples) - rel_drifted
if rel_drifted >= 5 and rel_stable >= 5:
X_rel = np.array([r["hidden_states"][layer] for r in rel_samples])
y_rel = np.array([1 if r["is_drifted"] else 0 for r in rel_samples])
rp, rel_auroc, _ = best_probe_gpu(
X_rel, y_rel, C_list=[best_C], max_iter=max_iter, device=probe_device)
per_relation[rel] = {
"auroc": rel_auroc,
"n_drifted": rel_drifted,
"n_stable": rel_stable,
}
# ── Neuron overlap ────────────────────────────────────────────────────
drift_neurons = set(np.where(w_drift != 0)[0])
unc_neurons = set(np.where(w_unc != 0)[0])
overlap = drift_neurons & unc_neurons
overlap_ratio = len(overlap) / max(len(drift_neurons | unc_neurons), 1)
elapsed = time.time() - t0
return {
"layer": layer,
"drift_auroc": best_drift_auroc,
"drift_C": best_C,
"drift_active_neurons": n_active_drift,
"uncertainty_auroc": best_unc_auroc,
"uncertainty_active_neurons": n_active_unc,
"uncertainty_threshold": actual_threshold,
"correctness_auroc": best_corr_auroc,
"n_correct": n_correct,
"n_wrong": n_wrong,
"cos_drift_uncertainty": cos_drift_unc,
"cos_drift_correctness": cos_drift_corr,
"cos_uncertainty_correctness": cos_unc_corr,
"neuron_overlap_ratio": overlap_ratio,
"cell_analysis": cells,
"per_relation": per_relation,
"elapsed_seconds": elapsed,
# weight vectors kept in memory for figures, stripped before JSON save
"w_drift": w_drift,
"w_unc": w_unc,
"w_corr": w_corr,
}
# ============================================================================
# FIGURE GENERATION
# ============================================================================
def save_layer_figures(all_layer_results, output_dir, results=None):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
os.makedirs(os.path.join(output_dir, "figures"), exist_ok=True)
layers = sorted(all_layer_results.keys())
if len(layers) < 2:
return
# Fig 1 β€” Dashboard
fig, axes = plt.subplots(1, 3, figsize=(20, 6))
ax = axes[0]
ax.plot(layers, [all_layer_results[l]["drift_auroc"] for l in layers],
"o-", color="#e74c3c", lw=2, ms=6, label="Drift probe")
ax.plot(layers, [all_layer_results[l]["uncertainty_auroc"] for l in layers],
"s--", color="#3498db", lw=2, ms=6, label="Uncertainty probe")
ax.plot(layers, [all_layer_results[l]["correctness_auroc"] for l in layers],
"^:", color="#2ecc71", lw=2, ms=6, label="Correctness probe")
ax.set(xlabel="Layer", ylabel="AUROC",
title="Probe AUROC by Layer", ylim=(0.4, 1.05))
ax.legend(fontsize=11); ax.grid(alpha=0.3)
ax = axes[1]
ax.plot(layers, [abs(all_layer_results[l]["cos_drift_uncertainty"]) for l in layers],
"o-", color="#e74c3c", lw=2, label="|cos(drift, unc)|")
ax.plot(layers, [abs(all_layer_results[l]["cos_drift_correctness"]) for l in layers],
"s-", color="#2ecc71", lw=2, label="|cos(drift, corr)|")
ax.plot(layers, [abs(all_layer_results[l]["cos_uncertainty_correctness"]) for l in layers],
"^-", color="#3498db", lw=2, label="|cos(unc, corr)|")
ax.axhline(0.3, color="gray", ls="--", alpha=0.5, label="Threshold")
ax.set(xlabel="Layer", ylabel="|Cosine Similarity|",
title="3-Way Disentanglement", ylim=(0, 1.0))
ax.legend(fontsize=10); ax.grid(alpha=0.3)
ax = axes[2]
ax.plot(layers, [all_layer_results[l]["drift_active_neurons"] for l in layers],
"o-", color="#e74c3c", lw=2, label="Drift neurons")
ax.plot(layers, [all_layer_results[l]["uncertainty_active_neurons"] for l in layers],
"s-", color="#3498db", lw=2, label="Uncertainty neurons")
ax2 = ax.twinx()
ax2.plot(layers, [all_layer_results[l]["neuron_overlap_ratio"] for l in layers],
"D--", color="#9b59b6", lw=2, label="Overlap ratio")
ax.set(xlabel="Layer", ylabel="Active Neurons", title="Neuron Sparsity & Overlap")
ax2.set_ylabel("Overlap Ratio", color="#9b59b6")
ax.legend(loc="upper left", fontsize=10)
ax2.legend(loc="upper right", fontsize=10)
ax.grid(alpha=0.3)
plt.tight_layout()
plt.savefig(os.path.join(output_dir, "figures", "fig1_dashboard.png"),
dpi=300, bbox_inches="tight")
plt.close()
# Fig 2 β€” Per-relation bar chart
best_layer = max(layers, key=lambda l: all_layer_results[l]["drift_auroc"])
per_rel = all_layer_results[best_layer].get("per_relation", {})
if per_rel:
sorted_rels = sorted(per_rel, key=lambda r: per_rel[r]["auroc"], reverse=True)
fig, ax = plt.subplots(figsize=(12, max(6, len(sorted_rels) * 0.5)))
aurocs = [per_rel[r]["auroc"] for r in sorted_rels]
colors = ["#2ecc71" if a > 0.8 else ("#f39c12" if a > 0.6 else "#e74c3c")
for a in aurocs]
ax.barh(range(len(sorted_rels)), aurocs, color=colors,
edgecolor="black", lw=0.5)
for i, r in enumerate(sorted_rels):
n = per_rel[r]["n_drifted"] + per_rel[r]["n_stable"]
ax.text(aurocs[i] + 0.01, i, f"n={n}", va="center", fontsize=9)
ax.set_yticks(range(len(sorted_rels)))
ax.set_yticklabels(sorted_rels, fontsize=11)
ax.set(xlabel="AUROC",
title=f"Drift Detection by Relation (Layer {best_layer})",
xlim=(0.3, 1.05))
ax.axvline(0.5, color="gray", ls="--", alpha=0.5)
ax.grid(alpha=0.3, axis="x")
plt.tight_layout()
plt.savefig(os.path.join(output_dir, "figures", "fig2_per_relation.png"),
dpi=300, bbox_inches="tight")
plt.close()
# Fig 3 β€” 2Γ—2 cell analysis
cells = all_layer_results[best_layer].get("cell_analysis", {})
if cells:
fig, ax = plt.subplots(figsize=(10, 6))
cnames = list(cells.keys())
means = [cells[c]["mean"] for c in cnames]
stds = [cells[c]["std"] for c in cnames]
ns = [cells[c]["n"] for c in cnames]
clrs = ["#3498db", "#e74c3c", "#95a5a6", "#e67e22"]
ax.bar(range(len(cnames)), means, yerr=stds, capsize=5,
color=clrs[:len(cnames)], edgecolor="black", lw=0.5)
ax.set_xticks(range(len(cnames)))
ax.set_xticklabels(
[f"{c.replace('_', chr(10))}\n(n={ns[i]})" for i, c in enumerate(cnames)],
fontsize=10)
ax.set(ylabel="Mean Drift Probe Score",
title=f"2Γ—2 Factorial Analysis (Layer {best_layer})")
ax.axhline(0.5, color="gray", ls="--", alpha=0.5, label="Chance")
ax.legend(); ax.grid(alpha=0.3, axis="y")
plt.tight_layout()
plt.savefig(os.path.join(output_dir, "figures", "fig3_2x2_cells.png"),
dpi=300, bbox_inches="tight")
plt.close()
# Fig 4 β€” PCA + projection
if results is not None and "w_drift" in all_layer_results[best_layer]:
from sklearn.decomposition import PCA
X_best = np.array([r["hidden_states"][best_layer] for r in results])
is_d = np.array([r["is_drifted"] for r in results])
pca = PCA(n_components=2)
X_2d = pca.fit_transform(X_best)
fig, axes = plt.subplots(1, 2, figsize=(16, 7))
ax = axes[0]
ax.scatter(X_2d[~is_d, 0], X_2d[~is_d, 1], c="#3498db", alpha=0.4,
s=30, label="Stable", edgecolors="white", lw=0.3)
ax.scatter(X_2d[is_d, 0], X_2d[is_d, 1], c="#e74c3c", alpha=0.6,
s=50, label="Drifted", edgecolors="black", lw=0.3, marker="*")
ax.set(xlabel=f"PC1 ({pca.explained_variance_ratio_[0]:.1%})",
ylabel=f"PC2 ({pca.explained_variance_ratio_[1]:.1%})",
title=f"Layer {best_layer}: Drift Status")
ax.legend(fontsize=11); ax.grid(alpha=0.2)
ax = axes[1]
w_d = all_layer_results[best_layer]["w_drift"]
w_u = all_layer_results[best_layer]["w_unc"]
Xsc = (X_best - X_best.mean(0)) / (X_best.std(0) + 1e-8)
pd_ = Xsc @ (w_d / (np.linalg.norm(w_d) + 1e-8))
pu_ = Xsc @ (w_u / (np.linalg.norm(w_u) + 1e-8))
ax.scatter(pd_[~is_d], pu_[~is_d], c="#3498db", alpha=0.4, s=30,
label="Stable", edgecolors="white", lw=0.3)
ax.scatter(pd_[is_d], pu_[is_d], c="#e74c3c", alpha=0.6, s=50,
label="Drifted", edgecolors="black", lw=0.3, marker="*")
cos_v = all_layer_results[best_layer]["cos_drift_uncertainty"]
ax.set(xlabel="Drift Direction", ylabel="Uncertainty Direction",
title=f"Drift vs Uncertainty (cos={cos_v:.3f})")
ax.legend(fontsize=11)
ax.axhline(0, color="gray", ls="--", alpha=0.3)
ax.axvline(0, color="gray", ls="--", alpha=0.3)
ax.grid(alpha=0.2)
plt.tight_layout()
plt.savefig(os.path.join(output_dir, "figures", "fig4_pca_projections.png"),
dpi=300, bbox_inches="tight")
plt.close()
# Fig 5 β€” Cosine similarity matrix
fig, ax = plt.subplots(figsize=(7, 6))
bl = best_layer
matrix = np.array([
[1.0, all_layer_results[bl]["cos_drift_uncertainty"],
all_layer_results[bl]["cos_drift_correctness"]],
[all_layer_results[bl]["cos_drift_uncertainty"], 1.0,
all_layer_results[bl]["cos_uncertainty_correctness"]],
[all_layer_results[bl]["cos_drift_correctness"],
all_layer_results[bl]["cos_uncertainty_correctness"], 1.0],
])
im = ax.imshow(matrix, cmap="RdBu_r", vmin=-1, vmax=1)
ax.set_xticks(range(3)); ax.set_yticks(range(3))
labels = ["Drift", "Uncertainty", "Correctness"]
ax.set_xticklabels(labels, fontsize=12)
ax.set_yticklabels(labels, fontsize=12)
for i in range(3):
for j in range(3):
c = "white" if abs(matrix[i, j]) > 0.5 else "black"
ax.text(j, i, f"{matrix[i,j]:.3f}", ha="center", va="center",
fontsize=14, fontweight="bold", color=c)
ax.set_title(f"3-Way Cosine Similarity Matrix (Layer {bl})",
fontsize=14, fontweight="bold")
plt.colorbar(im, ax=ax, shrink=0.8)
plt.tight_layout()
plt.savefig(os.path.join(output_dir, "figures", "fig5_cosine_matrix.png"),
dpi=300, bbox_inches="tight")
plt.close()
logger.info(f"Figures saved β†’ {output_dir}/figures/")
# ============================================================================
# PERMUTATION TEST β€” GPU
# ============================================================================
def run_permutation_test(results, best_layer, n_permutations=1000,
probe_device="cuda"):
from sklearn.metrics import roc_auc_score
logger.info(f"Permutation test ({n_permutations} iters) at layer {best_layer}…")
X = np.array([r["hidden_states"][best_layer] for r in results])
y = np.array([1 if r["is_drifted"] else 0 for r in results])
true_probe, true_auroc = train_probe_gpu(X, y, C=0.1, max_iter=500,
device=probe_device, n_splits=3)
logger.info(f" True AUROC: {true_auroc:.4f}")
null_aurocs = []
for i in range(n_permutations):
y_shuf = np.random.permutation(y)
p, a = train_probe_gpu(X, y_shuf, C=0.1, max_iter=200,
device=probe_device, n_splits=2)
null_aurocs.append(a)
if (i + 1) % 200 == 0:
logger.info(f" Permutation {i+1}/{n_permutations}")
null_aurocs = np.array(null_aurocs)
p_value = float(np.mean(null_aurocs >= true_auroc))
logger.info(f" Null mean: {null_aurocs.mean():.4f}, p={p_value:.6f}")
return {
"true_auroc": float(true_auroc),
"null_mean": float(null_aurocs.mean()),
"null_std": float(null_aurocs.std()),
"p_value": p_value,
"n": n_permutations,
}
# ============================================================================
# SPARSITY CURVE β€” GPU
# ============================================================================
def run_sparsity_analysis(results, best_layer, probe_device="cuda"):
logger.info(f"Sparsity analysis at layer {best_layer}…")
X = np.array([r["hidden_states"][best_layer] for r in results])
y = np.array([1 if r["is_drifted"] else 0 for r in results])
out = []
# Note: GPU probe uses L2, not L1, so sparsity comes from small weights
# We threshold small weights to count "active" neurons
for C_val in [0.0001, 0.001, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0]:
probe, auroc = train_probe_gpu(X, y, C=C_val, max_iter=500,
device=probe_device, n_splits=3)
w = probe.coef_[0]
thresh = np.percentile(np.abs(w), 50) # top 50% magnitude = "active"
n_act = int(np.sum(np.abs(w) > thresh))
out.append({"C": C_val, "n_active": n_act, "auroc": float(auroc)})
logger.info(f" C={C_val:>8.4f}: {n_act:>5d} neurons, AUROC={auroc:.4f}")
return out
# ============================================================================
# MAIN
# ============================================================================
def main():
parser = argparse.ArgumentParser(description="Disentanglement Analysis v2 β€” GPU Edition")
parser.add_argument("--model", default="Qwen/Qwen2.5-7B-Instruct")
parser.add_argument("--dataset", default="data/tier1_qwen25.json")
parser.add_argument("--output_dir", default="data/experiments/tier1_qwen25")
parser.add_argument("--device", default="auto")
parser.add_argument("--confidence_threshold", type=float, default=0.5)
parser.add_argument("--layers", type=int, nargs="+", default=None)
parser.add_argument("--max_samples", type=int, default=None)
parser.add_argument("--skip_extraction", action="store_true")
parser.add_argument("--n_permutations", type=int, default=1000)
parser.add_argument("--max_iter", type=int, default=500,
help="LBFGS iterations for probe training")
args = parser.parse_args()
# Probe device: prefer cuda:0 if available
probe_device = "cuda:0" if torch.cuda.is_available() else "cpu"
logger.info(f"Probe training device: {probe_device}")
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(os.path.join(args.output_dir, "figures"), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, "per_layer"), exist_ok=True)
fh = logging.FileHandler(os.path.join(args.output_dir, "experiment.log"))
fh.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.addHandler(fh)
print("\n" + "=" * 70)
print(" DISENTANGLEMENT ANALYSIS v2 β€” GPU EDITION")
print("=" * 70)
print(f" Model: {args.model}")
print(f" Dataset: {args.dataset}")
print(f" Output: {args.output_dir}")
print(f" Probe device: {probe_device}")
print(f" Layers: {args.layers or 'all'}")
print("=" * 70 + "\n")
# ── Load dataset ─────────────────────────────────────────────────────────
with open(args.dataset) as f:
dataset = json.load(f)
samples = dataset["samples"] if "samples" in dataset else dataset
logger.info(f"Loaded {len(samples)} samples")
for s in samples:
raw = s.get("is_drifted_query", s.get("is_drifted", False))
s["is_drifted"] = raw.lower() in ("true", "1", "yes") \
if isinstance(raw, str) else bool(raw)
samples = [s for s in samples if str(s.get("temporal_zone", "")) == "post_cutoff"]
logger.info(f"Post-cutoff samples: {len(samples)}")
n_drifted = sum(1 for s in samples if s["is_drifted"])
logger.info(f"Drifted: {n_drifted}, Stable: {len(samples) - n_drifted}")
if n_drifted == 0:
logger.error("NO DRIFTED SAMPLES β€” check temporal_zone / is_drifted fields")
sys.exit(1)
# ── Extract or load cache ─────────────────────────────────────────────────
cache_path = os.path.join(args.output_dir, "cached_states.npz")
if args.skip_extraction and os.path.exists(cache_path):
logger.info(f"Loading cached hidden states from {cache_path}")
results = np.load(cache_path, allow_pickle=True)["results"].tolist()
logger.info(f"Loaded {len(results)} cached results")
else:
model, tokenizer = load_model(args.model, args.device)
results = extract_hidden_states(
model, tokenizer, samples, args.output_dir, args.max_samples)
# ── Determine layers ──────────────────────────────────────────────────────
num_layers = max(max(r["hidden_states"].keys()) for r in results) + 1
layers_to_run = args.layers or list(range(num_layers))
logger.info(f"Will analyze {len(layers_to_run)} layers: {layers_to_run}")
# ── Resume from previously completed layers ───────────────────────────────
all_layer_results = {}
existing_path = os.path.join(args.output_dir, "all_layer_results.json")
if os.path.exists(existing_path):
with open(existing_path) as f:
saved = json.load(f)
all_layer_results = {int(k): v for k, v in saved.items()}
logger.info(f"Resumed: {len(all_layer_results)} layers already done")
# ── Per-layer loop ────────────────────────────────────────────────────────
for layer in layers_to_run:
if layer in all_layer_results and "drift_auroc" in all_layer_results[layer]:
logger.info(f"Layer {layer}: already done "
f"(AUROC={all_layer_results[layer]['drift_auroc']:.4f}), skip")
continue
logger.info(f"\n{'='*60}")
logger.info(f" LAYER {layer}/{num_layers - 1}")
logger.info(f"{'='*60}")
res = analyze_single_layer(
layer, results,
confidence_threshold=args.confidence_threshold,
probe_device=probe_device,
max_iter=args.max_iter,
)
logger.info(f" Drift AUROC: {res['drift_auroc']:.4f}")
logger.info(f" Uncertainty AUROC: {res['uncertainty_auroc']:.4f}")
logger.info(f" Correctness AUROC: {res['correctness_auroc']:.4f}")
logger.info(f" cos(drift,unc): {res['cos_drift_uncertainty']:.4f}")
logger.info(f" cos(drift,corr): {res['cos_drift_correctness']:.4f}")
logger.info(f" cos(unc,corr): {res['cos_uncertainty_correctness']:.4f}")
logger.info(f" Active neurons: {res['drift_active_neurons']} drift, "
f"{res['uncertainty_active_neurons']} unc")
logger.info(f" Neuron overlap: {res['neuron_overlap_ratio']:.2%}")
logger.info(f" Time: {res['elapsed_seconds']:.1f}s")
for cell, vals in res.get("cell_analysis", {}).items():
logger.info(f" {cell}: n={vals['n']}, "
f"drift_score={vals['mean']:.3f}Β±{vals['std']:.3f}")
if res.get("per_relation"):
for rel, rv in sorted(res["per_relation"].items(),
key=lambda x: -x[1]["auroc"]):
logger.info(f" {rel:30s}: {rv['auroc']:.4f} "
f"(n={rv['n_drifted']}+{rv['n_stable']})")
# Strip weight vectors before JSON save
save_res = {k: v for k, v in res.items()
if k not in ("w_drift", "w_unc", "w_corr")}
all_layer_results[layer] = save_res
with open(os.path.join(args.output_dir, "per_layer",
f"layer_{layer:02d}.json"), "w") as f:
json.dump(save_res, f, indent=2, default=str)
with open(existing_path, "w") as f:
json.dump(all_layer_results, f, indent=2, default=str)
# Temporarily put weight vectors back for figures
all_layer_results[layer]["w_drift"] = res["w_drift"]
all_layer_results[layer]["w_unc"] = res["w_unc"]
all_layer_results[layer]["w_corr"] = res["w_corr"]
save_layer_figures(all_layer_results, args.output_dir, results)
all_layer_results[layer] = save_res # strip again
logger.info(f" Layer {layer} complete.")
# ── Final analyses ────────────────────────────────────────────────────────
best_layer = max(all_layer_results, key=lambda l: all_layer_results[l]["drift_auroc"])
logger.info(f"\nBest layer: {best_layer}")
perm = run_permutation_test(results, best_layer,
args.n_permutations, probe_device)
sparsity = run_sparsity_analysis(results, best_layer, probe_device)
final = {
"model": args.model,
"dataset": args.dataset,
"n_samples": len(results),
"n_drifted": sum(1 for r in results if r["is_drifted"]),
"best_layer": int(best_layer),
"layer_results": {int(k): v for k, v in all_layer_results.items()},
"permutation_test": perm,
"sparsity_curve": sparsity,
"timestamp": datetime.now().isoformat(),
}
with open(os.path.join(args.output_dir, "final_results.json"), "w") as f:
json.dump(final, f, indent=2, default=str)
best = all_layer_results[best_layer]
print("\n" + "=" * 70)
print(" β˜…β˜…β˜… FINAL RESULTS β˜…β˜…β˜…")
print("=" * 70)
print(f" Best layer: {best_layer}")
print(f" Drift AUROC: {best['drift_auroc']:.4f}")
print(f" Uncertainty AUROC: {best['uncertainty_auroc']:.4f}")
print(f" Correctness AUROC: {best['correctness_auroc']:.4f}")
print(f" cos(drift, unc): {best['cos_drift_uncertainty']:.4f}")
print(f" cos(drift, corr): {best['cos_drift_correctness']:.4f}")
print(f" cos(unc, corr): {best['cos_uncertainty_correctness']:.4f}")
print(f" Permutation p-value: {perm['p_value']:.6f}")
print(f" Active neurons: {best['drift_active_neurons']}")
print()
if abs(best["cos_drift_uncertainty"]) < 0.3:
print(" βœ… DRIFT AND UNCERTAINTY ARE GEOMETRICALLY SEPARABLE")
if abs(best["cos_drift_correctness"]) < 0.5:
print(" βœ… DRIFT AND CORRECTNESS ARE DISTINCT SIGNALS")
print("=" * 70)
print(f" Results: {args.output_dir}/final_results.json")
print(f" Figures: {args.output_dir}/figures/")
print("=" * 70)
if __name__ == "__main__":
main()