#!/usr/bin/env python3 """ cross_model.py — Cross-Model Drift Analysis ============================================= Runs AFTER analyze_single.py on 2+ models. Uses probe bundles + caches to compare drift representations across architectures. 6 Experiments: [CM-1] Full-layer CKA matrix (L_A × L_B per pair, not just best layer) [CM-2] Drift score correlation (probe A scores vs probe B scores on shared queries) [CM-3] Differential facts (queries drifted for A but stable for B) [CM-4] Layer correspondence (best layer as % of depth — universal localization?) [CM-5] Neuron overlap (same-dim models only: which neuron indices carry drift?) [CM-6] Universality score (aggregate metric for paper abstract) Outputs: cross_model_results.json Complete results figures/fig_cm1_cka.png Layer-wise CKA heatmaps figures/fig_cm2_corr.png Score correlation matrix figures/fig_cm3_diff.png Differential facts scatter figures/fig_cm4_layers.png Layer correspondence bar figures/fig_cm5_neurons.png Neuron overlap (same-dim pairs) figures/fig_cm6_summary.png Universality summary Usage: # Compare two models python cross_model.py --models qwen25 llama31 # All available models python cross_model.py --all # Quick mode (skip full-layer CKA, just best-layer) python cross_model.py --all --quick """ import argparse import json import logging import time import warnings from pathlib import Path import numpy as np import yaml warnings.filterwarnings("ignore") logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s", handlers=[logging.StreamHandler()]) logger = logging.getLogger(__name__) # ───────────────────────────────────────────────────────────────────────────── # CONFIG + DATA LOADING # ───────────────────────────────────────────────────────────────────────────── def load_config(path="models.yaml"): with open(path) as f: return yaml.safe_load(f) def load_cache(model_dir, model_key): path = Path(model_dir) / model_key / f"cached_{model_key}.npz" if not path.exists(): logger.error(f"Cache not found: {path}") return None results = np.load(str(path), allow_pickle=True)["results"].tolist() logger.info(f" [{model_key}] Loaded {len(results)} samples") return results def load_probe_bundle(model_dir, model_key): path = Path(model_dir) / model_key / f"probe_bundle_{model_key}.npz" if not path.exists(): logger.warning(f"Probe bundle not found: {path}") return None d = np.load(str(path), allow_pickle=True) bundle = {k: d[k] for k in d.files} # Convert scalar items for k in ["best_layer", "hidden_dim", "n_samples"]: if k in bundle: bundle[k] = int(bundle[k]) for k in ["drift_auroc", "cos_du", "cos_dc"]: if k in bundle: bundle[k] = float(bundle[k]) logger.info(f" [{model_key}] Bundle: layer={bundle.get('best_layer')}, " f"dim={bundle.get('hidden_dim')}, " f"AUROC={bundle.get('drift_auroc', 0):.4f}") return bundle def load_final_results(model_dir, model_key): path = Path(model_dir) / model_key / "final_results.json" if not path.exists(): return None with open(path) as f: return json.load(f) # ───────────────────────────────────────────────────────────────────────────── # PROBE FITTING (lightweight — for scoring shared queries) # ───────────────────────────────────────────────────────────────────────────── def soft_threshold(w, lam): import torch return torch.sign(w) * torch.clamp(torch.abs(w) - lam, min=0.0) def fit_quick_probe(X_np, y_np, device="cuda:0", lam=1e-3, max_iter=500): """Fast probe fit for cross-model scoring.""" import torch X = np.nan_to_num(X_np.astype(np.float32), nan=0., posinf=1e4, neginf=-1e4) X = np.clip(X, -1e4, 1e4) m = X.mean(0, keepdims=True) s = X.std(0, keepdims=True) + 1e-8 Xt = torch.tensor((X - m) / s, dtype=torch.float32, device=device) yt = torch.tensor(y_np.astype(np.float32), device=device) w = torch.zeros(Xt.shape[1], device=device) b = torch.zeros(1, device=device) lr = 1.0 for _ in range(max_iter): z = torch.clamp(Xt @ w + b, -30, 30) p = torch.sigmoid(z) L = -((yt * torch.log(p + 1e-12)) + (1 - yt) * torch.log(1 - p + 1e-12)).mean() e = p - yt gw = (Xt.T @ e) / len(yt) gb = e.mean(keepdim=True) wt = soft_threshold(w - lr * gw, lr * lam) bt = b - lr * gb Lt = -((yt * torch.log(torch.sigmoid(torch.clamp(Xt @ wt + bt, -30, 30)) + 1e-12)) + (1 - yt) * torch.log(1 - torch.sigmoid(torch.clamp(Xt @ wt + bt, -30, 30)) + 1e-12)).mean() if Lt > L + 1e-4: lr *= 0.5 else: lr = min(lr * 1.05, 10.0) if (wt - w).abs().max().item() < 1e-6: w, b = wt, bt break w, b = wt, bt def score(X_new): Xn = np.nan_to_num(X_new.astype(np.float32), nan=0., posinf=1e4, neginf=-1e4) Xn = np.clip(Xn, -1e4, 1e4) Xn = torch.tensor((Xn - m) / s, dtype=torch.float32, device=device) with torch.no_grad(): return torch.sigmoid(torch.clamp(Xn @ w + b, -30, 30)).cpu().numpy() return score, w.cpu().numpy() # ───────────────────────────────────────────────────────────────────────────── # [CM-1] CKA ANALYSIS # ───────────────────────────────────────────────────────────────────────────── def linear_cka(Xa, Xb): """Centered Kernel Alignment between two representation matrices.""" def _center(K): n = K.shape[0] H = np.eye(n) - 1.0 / n return H @ K @ H Ka = _center(Xa @ Xa.T) Kb = _center(Xb @ Xb.T) num = np.linalg.norm(Ka.T @ Kb, "fro") den = np.linalg.norm(Ka, "fro") * np.linalg.norm(Kb, "fro") return float(num / (den + 1e-12)) def cka_analysis(res_a, res_b, key_a, key_b, quick=False): """ [CM-1] CKA between two models. If quick=False: full L_A × L_B heatmap. If quick=True: just best-layer CKA. """ logger.info(f"[CM-1] CKA: {key_a} vs {key_b}") # Build shared query lookup qa = {r["query"]: r for r in res_a} qb = {r["query"]: r for r in res_b} shared = sorted(set(qa) & set(qb)) logger.info(f" Shared queries: {len(shared)}") if len(shared) < 50: logger.warning(" Too few shared queries for CKA") return None # Subsample for speed (CKA is O(n²)) if len(shared) > 2000: np.random.seed(42) shared = list(np.random.choice(shared, 2000, replace=False)) layers_a = sorted(res_a[0]["hidden_states"].keys()) layers_b = sorted(res_b[0]["hidden_states"].keys()) if quick: # Just best layers best_a = layers_a[-5:] # top 5 layers best_b = layers_b[-5:] else: # Sample layers evenly (max 10 per model for tractability) step_a = max(1, len(layers_a) // 10) step_b = max(1, len(layers_b) // 10) best_a = layers_a[::step_a] best_b = layers_b[::step_b] cka_mat = np.zeros((len(best_a), len(best_b))) for i, la in enumerate(best_a): Xa = np.array([qa[q]["hidden_states"][la] for q in shared]) for j, lb in enumerate(best_b): Xb = np.array([qb[q]["hidden_states"][lb] for q in shared]) cka_mat[i, j] = linear_cka(Xa, Xb) if (i + 1) % 3 == 0: logger.info(f" CKA row {i+1}/{len(best_a)}") best_cka = float(cka_mat.max()) logger.info(f" Best CKA: {best_cka:.4f}") return { "layers_a": best_a, "layers_b": best_b, "cka_matrix": cka_mat.tolist(), "best_cka": best_cka, "n_shared": len(shared), } # ───────────────────────────────────────────────────────────────────────────── # [CM-2] SCORE CORRELATION # ───────────────────────────────────────────────────────────────────────────── def score_correlation(res_a, res_b, key_a, key_b, bundle_a, bundle_b, device): """ [CM-2] Train probe on each model, score shared queries, correlate. """ from sklearn.metrics import roc_auc_score logger.info(f"[CM-2] Score correlation: {key_a} vs {key_b}") qa = {r["query"]: r for r in res_a} qb = {r["query"]: r for r in res_b} shared = sorted(set(qa) & set(qb)) logger.info(f" Shared: {len(shared)}") if len(shared) < 50: return None bl_a = int(bundle_a["best_layer"]) bl_b = int(bundle_b["best_layer"]) # Train probes on full data X_a = np.array([r["hidden_states"][bl_a] for r in res_a]) y_a = np.array([int(r["is_drifted"]) for r in res_a]) X_b = np.array([r["hidden_states"][bl_b] for r in res_b]) y_b = np.array([int(r["is_drifted"]) for r in res_b]) score_a, _ = fit_quick_probe(X_a, y_a, device) score_b, _ = fit_quick_probe(X_b, y_b, device) # Score shared queries Xa_shared = np.array([qa[q]["hidden_states"][bl_a] for q in shared]) Xb_shared = np.array([qb[q]["hidden_states"][bl_b] for q in shared]) sa = score_a(Xa_shared) sb = score_b(Xb_shared) # Labels for shared ya_shared = np.array([int(qa[q]["is_drifted"]) for q in shared]) yb_shared = np.array([int(qb[q]["is_drifted"]) for q in shared]) corr = float(np.corrcoef(sa, sb)[0, 1]) try: auroc_a = roc_auc_score(ya_shared, sa) auroc_b = roc_auc_score(yb_shared, sb) except Exception: auroc_a = auroc_b = 0.5 logger.info(f" Score corr: {corr:.4f} " f"AUROC_a={auroc_a:.4f} AUROC_b={auroc_b:.4f}") return { "correlation": corr, "auroc_a_on_shared": auroc_a, "auroc_b_on_shared": auroc_b, "n_shared": len(shared), "scores_a": sa.tolist(), "scores_b": sb.tolist(), } # ───────────────────────────────────────────────────────────────────────────── # [CM-3] DIFFERENTIAL FACTS # ───────────────────────────────────────────────────────────────────────────── def differential_facts(res_a, res_b, key_a, key_b, bundle_a, bundle_b, device): """ [CM-3] Queries where is_drifted differs between models. Each probe should detect its own model's drift correctly. """ from sklearn.metrics import roc_auc_score logger.info(f"[CM-3] Differential facts: {key_a} vs {key_b}") qa = {r["query"]: r for r in res_a} qb = {r["query"]: r for r in res_b} shared = sorted(set(qa) & set(qb)) # Find differential: drifted for A but not B, or vice versa diff_queries = [q for q in shared if qa[q]["is_drifted"] != qb[q]["is_drifted"]] logger.info(f" Shared={len(shared)}, Differential={len(diff_queries)}") if len(diff_queries) < 20: logger.warning(" Too few differential facts") return None bl_a = int(bundle_a["best_layer"]) bl_b = int(bundle_b["best_layer"]) # Train probes X_a = np.array([r["hidden_states"][bl_a] for r in res_a]) y_a = np.array([int(r["is_drifted"]) for r in res_a]) X_b = np.array([r["hidden_states"][bl_b] for r in res_b]) y_b = np.array([int(r["is_drifted"]) for r in res_b]) score_a, _ = fit_quick_probe(X_a, y_a, device) score_b, _ = fit_quick_probe(X_b, y_b, device) # Score differential queries Xa_d = np.array([qa[q]["hidden_states"][bl_a] for q in diff_queries]) Xb_d = np.array([qb[q]["hidden_states"][bl_b] for q in diff_queries]) sa = score_a(Xa_d) sb = score_b(Xb_d) la = np.array([int(qa[q]["is_drifted"]) for q in diff_queries]) lb = np.array([int(qb[q]["is_drifted"]) for q in diff_queries]) try: auroc_a = roc_auc_score(la, sa) except Exception: auroc_a = 0.5 try: auroc_b = roc_auc_score(lb, sb) except Exception: auroc_b = 0.5 # Anti-correlation: when A says drifted and B says stable, # score_a should be high and score_b should be low score_corr = float(np.corrcoef(sa, sb)[0, 1]) # Count categories a_only = sum(1 for q in diff_queries if qa[q]["is_drifted"] and not qb[q]["is_drifted"]) b_only = sum(1 for q in diff_queries if not qa[q]["is_drifted"] and qb[q]["is_drifted"]) logger.info(f" AUROC_a={auroc_a:.4f} AUROC_b={auroc_b:.4f} " f"score_corr={score_corr:.4f}") logger.info(f" A-only drifted: {a_only} B-only drifted: {b_only}") return { "n_differential": len(diff_queries), "n_shared": len(shared), "a_only_drifted": a_only, "b_only_drifted": b_only, "auroc_a": auroc_a, "auroc_b": auroc_b, "score_correlation": score_corr, "scores_a": sa.tolist(), "scores_b": sb.tolist(), "labels_a": la.tolist(), "labels_b": lb.tolist(), } # ───────────────────────────────────────────────────────────────────────────── # [CM-4] LAYER CORRESPONDENCE # ───────────────────────────────────────────────────────────────────────────── def layer_correspondence(all_bundles, all_final): """ [CM-4] Best drift layer as fraction of total depth. If all models peak at ~80%, drift localization is universal. """ logger.info("[CM-4] Layer correspondence") data = {} for key in all_bundles: bl = int(all_bundles[key]["best_layer"]) total = int(all_bundles[key].get("hidden_dim", 0)) # Get total layers from final results fr = all_final.get(key, {}) n_layers = fr.get("best_layer_results", {}).get("layer", bl) + 1 # Better: look at probe stability layers stab = fr.get("probe_stability", {}) if "layers" in stab and len(stab["layers"]) > 0: n_layers = max(stab["layers"]) + 1 frac = bl / max(n_layers, 1) auroc = float(all_bundles[key].get("drift_auroc", 0)) data[key] = { "best_layer": bl, "n_layers": n_layers, "fraction": frac, "auroc": auroc, } logger.info(f" {key}: L{bl}/{n_layers} = {frac:.1%} " f"AUROC={auroc:.4f}") fracs = [v["fraction"] for v in data.values()] mean_frac = float(np.mean(fracs)) std_frac = float(np.std(fracs)) logger.info(f" Mean fraction: {mean_frac:.1%} +/- {std_frac:.1%}") return { "per_model": data, "mean_fraction": mean_frac, "std_fraction": std_frac, } # ───────────────────────────────────────────────────────────────────────────── # [CM-5] NEURON OVERLAP (same-dim models only) # ───────────────────────────────────────────────────────────────────────────── def neuron_overlap(bundle_a, bundle_b, key_a, key_b): """ [CM-5] For same-dimension models: do the same neuron indices carry drift? """ dim_a = int(bundle_a["hidden_dim"]) dim_b = int(bundle_b["hidden_dim"]) if dim_a != dim_b: logger.info(f"[CM-5] {key_a}({dim_a}) vs {key_b}({dim_b}): " f"dim mismatch, skipping") return None logger.info(f"[CM-5] Neuron overlap: {key_a} vs {key_b} (dim={dim_a})") w_a = bundle_a["w_drift"] w_b = bundle_b["w_drift"] active_a = set(np.where(w_a != 0)[0]) active_b = set(np.where(w_b != 0)[0]) inter = len(active_a & active_b) union = len(active_a | active_b) jacc = inter / union if union > 0 else 0.0 # Cosine of weight vectors (even though from different models) cos = float(np.dot(w_a, w_b) / (np.linalg.norm(w_a) * np.linalg.norm(w_b) + 1e-12)) # Top-k overlap top100_a = set(np.argsort(np.abs(w_a))[-100:]) top100_b = set(np.argsort(np.abs(w_b))[-100:]) top100_overlap = len(top100_a & top100_b) / 100.0 logger.info(f" Active: A={len(active_a)}, B={len(active_b)}") logger.info(f" Jaccard: {jacc:.4f} Cosine: {cos:.4f} " f"Top-100 overlap: {top100_overlap:.2%}") return { "dim": dim_a, "n_active_a": len(active_a), "n_active_b": len(active_b), "intersection": inter, "union": union, "jaccard": jacc, "cosine": cos, "top100_overlap": top100_overlap, } # ───────────────────────────────────────────────────────────────────────────── # [CM-6] UNIVERSALITY SCORE # ───────────────────────────────────────────────────────────────────────────── def universality_score(all_cka, all_corr, all_diff, all_layer_corr, n_bootstrap=1000): """ [CM-6] Aggregate metric: geometric mean of CKA, score correlation, differential AUROC, and layer consistency. """ logger.info("[CM-6] Universality score") components = {} # Mean best CKA across pairs cka_vals = [v["best_cka"] for v in all_cka.values() if v] if cka_vals: components["mean_cka"] = float(np.mean(cka_vals)) # Mean score correlation corr_vals = [v["correlation"] for v in all_corr.values() if v] if corr_vals: components["mean_score_corr"] = float(np.mean(corr_vals)) # Mean differential AUROC diff_aurocs = [] for v in all_diff.values(): if v: diff_aurocs.extend([v["auroc_a"], v["auroc_b"]]) if diff_aurocs: components["mean_diff_auroc"] = float(np.mean(diff_aurocs)) # Layer consistency (1 - std of fractions) if all_layer_corr: components["layer_consistency"] = float( 1.0 - all_layer_corr.get("std_fraction", 0.5)) if not components: return None vals = list(components.values()) # Geometric mean geo_mean = float(np.exp(np.mean(np.log(np.clip(vals, 1e-6, None))))) # Bootstrap CI boot = [] for _ in range(n_bootstrap): idx = np.random.choice(len(vals), len(vals), replace=True) boot.append(np.exp(np.mean(np.log(np.clip(np.array(vals)[idx], 1e-6, None))))) ci_lo = float(np.percentile(boot, 2.5)) ci_hi = float(np.percentile(boot, 97.5)) logger.info(f" Components: {components}") logger.info(f" Universality: {geo_mean:.4f} [{ci_lo:.4f}, {ci_hi:.4f}]") return { "components": components, "universality_score": geo_mean, "ci_95": [ci_lo, ci_hi], } # ───────────────────────────────────────────────────────────────────────────── # FIGURES # ───────────────────────────────────────────────────────────────────────────── def save_cross_figures(out_dir, keys, all_cka, all_corr, all_diff, layer_data, neuron_data, univ_data): import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt fig_dir = Path(out_dir) / "figures" fig_dir.mkdir(parents=True, exist_ok=True) P = {"drift": "#e74c3c", "unc": "#3498db", "corr": "#2ecc71", "null": "#9b59b6", "neu": "#e67e22"} # ── CM-1: CKA heatmaps ─────────────────────────────────────────────── cka_pairs = [(k, v) for k, v in all_cka.items() if v] if cka_pairs: n_pairs = len(cka_pairs) fig, axes = plt.subplots(1, n_pairs, figsize=(8 * n_pairs, 7)) if n_pairs == 1: axes = [axes] fig.suptitle("[CM-1] Cross-Model CKA", fontsize=16, fontweight="bold") for ax, (pair_key, data) in zip(axes, cka_pairs): mat = np.array(data["cka_matrix"]) im = ax.imshow(mat, cmap="viridis", vmin=0, vmax=1, aspect="auto") la = data["layers_a"] lb = data["layers_b"] step_a = max(1, len(la) // 6) step_b = max(1, len(lb) // 6) ax.set_xticks(range(0, len(lb), step_b)) ax.set_yticks(range(0, len(la), step_a)) ax.set_xticklabels([lb[i] for i in range(0, len(lb), step_b)]) ax.set_yticklabels([la[i] for i in range(0, len(la), step_a)]) parts = pair_key.split("_vs_") ax.set(xlabel=f"{parts[1]} layer", ylabel=f"{parts[0]} layer", title=f"{pair_key}\nbest={data['best_cka']:.3f}") plt.colorbar(im, ax=ax, shrink=0.8) plt.tight_layout() plt.savefig(fig_dir / "fig_cm1_cka.png", dpi=300, bbox_inches="tight") plt.close() logger.info(" fig_cm1 saved") # ── CM-2: Score correlation matrix ──────────────────────────────────── if len(keys) >= 2 and all_corr: n = len(keys) mat = np.eye(n) for pair_key, data in all_corr.items(): if data is None: continue parts = pair_key.split("_vs_") if len(parts) == 2: i = keys.index(parts[0]) if parts[0] in keys else -1 j = keys.index(parts[1]) if parts[1] in keys else -1 if i >= 0 and j >= 0: mat[i, j] = mat[j, i] = data["correlation"] fig, ax = plt.subplots(figsize=(8, 7)) im = ax.imshow(mat, cmap="RdBu_r", vmin=-1, vmax=1) ax.set_xticks(range(n)) ax.set_yticks(range(n)) ax.set_xticklabels(keys, fontsize=12, rotation=20) ax.set_yticklabels(keys, fontsize=12) for i in range(n): for j in range(n): c = "white" if abs(mat[i, j]) > 0.5 else "black" ax.text(j, i, f"{mat[i,j]:.3f}", ha="center", va="center", fontsize=13, fontweight="bold", color=c) ax.set_title("[CM-2] Drift Score Correlation Matrix", fontsize=14) plt.colorbar(im, ax=ax, shrink=0.8) plt.tight_layout() plt.savefig(fig_dir / "fig_cm2_corr.png", dpi=300, bbox_inches="tight") plt.close() logger.info(" fig_cm2 saved") # ── CM-3: Differential facts ────────────────────────────────────────── diff_pairs = [(k, v) for k, v in all_diff.items() if v] if diff_pairs: n_pairs = min(len(diff_pairs), 4) fig, axes = plt.subplots(1, n_pairs, figsize=(7 * n_pairs, 6)) if n_pairs == 1: axes = [axes] fig.suptitle("[CM-3] Differential Facts", fontsize=16, fontweight="bold") for ax, (pair_key, data) in zip(axes, diff_pairs[:n_pairs]): sa = np.array(data["scores_a"]) sb = np.array(data["scores_b"]) la = np.array(data["labels_a"]) lb = np.array(data["labels_b"]) # Color by which model says drifted a_drifted = la.astype(bool) & ~lb.astype(bool) b_drifted = ~la.astype(bool) & lb.astype(bool) ax.scatter(sa[a_drifted], sb[a_drifted], c=P["drift"], alpha=0.5, s=30, label="A=drifted, B=stable") ax.scatter(sa[b_drifted], sb[b_drifted], c=P["unc"], alpha=0.5, s=30, label="A=stable, B=drifted") ax.plot([0, 1], [0, 1], "k--", alpha=0.3) ax.axhline(0.5, color="gray", ls=":", alpha=0.3) ax.axvline(0.5, color="gray", ls=":", alpha=0.3) parts = pair_key.split("_vs_") ax.set(xlabel=f"{parts[0]} score", ylabel=f"{parts[1]} score", title=f"{pair_key}\nr={data['score_correlation']:.3f}") ax.legend(fontsize=8) ax.grid(alpha=0.2) plt.tight_layout() plt.savefig(fig_dir / "fig_cm3_diff.png", dpi=300, bbox_inches="tight") plt.close() logger.info(" fig_cm3 saved") # ── CM-4: Layer correspondence ──────────────────────────────────────── if layer_data and "per_model" in layer_data: pm = layer_data["per_model"] models = sorted(pm.keys()) fig, axes = plt.subplots(1, 2, figsize=(14, 6)) fig.suptitle("[CM-4] Layer Correspondence", fontsize=14, fontweight="bold") # Absolute layers x = np.arange(len(models)) bls = [pm[m]["best_layer"] for m in models] nls = [pm[m]["n_layers"] for m in models] ax = axes[0] ax.bar(x, bls, color=P["drift"], edgecolor="black", lw=0.5, label="Best layer") ax.bar(x, [n - b for b, n in zip(bls, nls)], bottom=bls, color="#ecf0f1", edgecolor="black", lw=0.5, label="Remaining") ax.set_xticks(x) ax.set_xticklabels(models, fontsize=11) ax.set(ylabel="Layer", title="Best Drift Layer (absolute)") ax.legend() ax.grid(alpha=0.3, axis="y") # Fraction ax = axes[1] fracs = [pm[m]["fraction"] for m in models] bars = ax.bar(x, fracs, color=P["neu"], edgecolor="black", lw=0.5) ax.axhline(layer_data["mean_fraction"], color="red", ls="--", lw=2, label=f"Mean: {layer_data['mean_fraction']:.1%}") ax.fill_between( [-0.5, len(models) - 0.5], layer_data["mean_fraction"] - layer_data["std_fraction"], layer_data["mean_fraction"] + layer_data["std_fraction"], alpha=0.2, color="red") ax.set_xticks(x) ax.set_xticklabels(models, fontsize=11) ax.set(ylabel="Fraction of depth", title="Best Layer as % of Depth", ylim=(0, 1)) ax.legend() ax.grid(alpha=0.3, axis="y") plt.tight_layout() plt.savefig(fig_dir / "fig_cm4_layers.png", dpi=300, bbox_inches="tight") plt.close() logger.info(" fig_cm4 saved") # ── CM-6: Summary ───────────────────────────────────────────────────── if univ_data: fig, ax = plt.subplots(figsize=(10, 6)) comp = univ_data["components"] names = list(comp.keys()) vals = list(comp.values()) x = np.arange(len(names)) colors = [P["drift"], P["unc"], P["corr"], P["neu"]][:len(names)] ax.bar(x, vals, color=colors, edgecolor="black", lw=0.5, alpha=0.8) ax.axhline(univ_data["universality_score"], color="red", ls="--", lw=2.5, label=f"Geo mean: {univ_data['universality_score']:.3f} " f"[{univ_data['ci_95'][0]:.3f}, " f"{univ_data['ci_95'][1]:.3f}]") ax.set_xticks(x) ax.set_xticklabels([n.replace("_", "\n") for n in names], fontsize=10) ax.set(ylabel="Score", title="[CM-6] Universality Score Components", ylim=(0, 1.1)) ax.legend(fontsize=11) ax.grid(alpha=0.3, axis="y") plt.tight_layout() plt.savefig(fig_dir / "fig_cm6_summary.png", dpi=300, bbox_inches="tight") plt.close() logger.info(" fig_cm6 saved") logger.info(f"All cross-model figures -> {fig_dir}") # ───────────────────────────────────────────────────────────────────────────── # MAIN # ───────────────────────────────────────────────────────────────────────────── def main(): p = argparse.ArgumentParser( description="Cross-model drift analysis", formatter_class=argparse.ArgumentDefaultsHelpFormatter) p.add_argument("--models", nargs="+", default=None, help="Model keys to compare") p.add_argument("--all", action="store_true", help="Use all models with available caches") p.add_argument("--config", default="models.yaml") p.add_argument("--output_dir", default=None) p.add_argument("--device", default="cuda:0") p.add_argument("--quick", action="store_true", help="Skip full-layer CKA, just best-layer") args = p.parse_args() cfg = load_config(args.config) defaults = cfg.get("defaults", {}) output_dir = args.output_dir or defaults.get("output_dir", "data/experiments/v4") # Determine which models to use if args.all: model_keys = list(cfg["models"].keys()) elif args.models: model_keys = args.models else: logger.error("Specify --models or --all") return # Load caches and bundles all_results = {} all_bundles = {} all_final = {} for key in model_keys: res = load_cache(output_dir, key) bundle = load_probe_bundle(output_dir, key) final = load_final_results(output_dir, key) if res and bundle: all_results[key] = res all_bundles[key] = bundle if final: all_final[key] = final keys = sorted(all_results.keys()) logger.info(f"\nModels available: {keys}") if len(keys) < 2: logger.error("Need at least 2 models with caches + bundles") return cross_dir = Path(output_dir) / "cross_model" cross_dir.mkdir(parents=True, exist_ok=True) # Run all 6 experiments all_cka = {} all_corr = {} all_diff = {} all_neuron = {} for i, ka in enumerate(keys): for j, kb in enumerate(keys): if i >= j: continue pair = f"{ka}_vs_{kb}" logger.info(f"\n{'─'*50}") logger.info(f" {pair}") logger.info(f"{'─'*50}") # [CM-1] CKA all_cka[pair] = cka_analysis( all_results[ka], all_results[kb], ka, kb, quick=args.quick) # [CM-2] Score correlation all_corr[pair] = score_correlation( all_results[ka], all_results[kb], ka, kb, all_bundles[ka], all_bundles[kb], args.device) # [CM-3] Differential facts all_diff[pair] = differential_facts( all_results[ka], all_results[kb], ka, kb, all_bundles[ka], all_bundles[kb], args.device) # [CM-5] Neuron overlap all_neuron[pair] = neuron_overlap( all_bundles[ka], all_bundles[kb], ka, kb) # [CM-4] Layer correspondence layer_data = layer_correspondence(all_bundles, all_final) # [CM-6] Universality score univ_data = universality_score(all_cka, all_corr, all_diff, layer_data) # Save results results = { "models": keys, "cka": {k: v for k, v in all_cka.items()}, "score_correlation": {k: v for k, v in all_corr.items()}, "differential_facts": {k: v for k, v in all_diff.items()}, "neuron_overlap": {k: v for k, v in all_neuron.items() if v}, "layer_correspondence": layer_data, "universality": univ_data, "timestamp": datetime.now().isoformat(), } from datetime import datetime out_path = cross_dir / "cross_model_results.json" with open(out_path, "w") as f: json.dump(results, f, indent=2, default=str) logger.info(f"\nResults saved: {out_path}") # Figures save_cross_figures(str(cross_dir), keys, all_cka, all_corr, all_diff, layer_data, all_neuron, univ_data) # Print summary print(f"\n{'='*70}") print(f" CROSS-MODEL SUMMARY") print(f"{'='*70}") for pair, data in all_corr.items(): if data: print(f" {pair}: score_corr={data['correlation']:.4f}") for pair, data in all_diff.items(): if data: print(f" {pair}: diff_AUROC_a={data['auroc_a']:.4f} " f"diff_AUROC_b={data['auroc_b']:.4f} " f"n_diff={data['n_differential']}") if layer_data: print(f"\n Layer correspondence: " f"{layer_data['mean_fraction']:.1%} +/- " f"{layer_data['std_fraction']:.1%}") if univ_data: print(f"\n UNIVERSALITY SCORE: " f"{univ_data['universality_score']:.4f} " f"[{univ_data['ci_95'][0]:.4f}, {univ_data['ci_95'][1]:.4f}]") print(f"{'='*70}") if __name__ == "__main__": main()