""" Distributional evaluation metrics for perturbation prediction. Complements cell-eval (which focuses on conditional mean accuracy) with metrics that measure distributional fidelity: 1. Per-perturbation MMD (Maximum Mean Discrepancy) 2. Per-perturbation Energy Distance 3. Variance Ratio (per-gene variance match) 4. Gene-Gene Correlation Preservation 5. Classifier Two-Sample Test (C2ST) 6. k-NN Precision / Recall """ import numpy as np import anndata as ad import pandas as pd from scipy.spatial.distance import cdist from scipy.stats import pearsonr from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.preprocessing import StandardScaler # ── Helpers ────────────────────────────────────────────────────────── def _get_pert_cells(adata, pert): mask = adata.obs["perturbation"] == pert X = adata[mask].X return np.asarray(X) if not isinstance(X, np.ndarray) else X def _rbf_kernel(X, Y, sigma): D2 = cdist(X, Y, metric="sqeuclidean") return np.exp(-D2 / (2 * sigma ** 2)) # ── 1. MMD (multi-sigma RBF) ──────────────────────────────────────── def mmd_multi_sigma(X, Y, scales=(0.5, 1.0, 2.0, 4.0)): D2_all = cdist(X, X, "sqeuclidean") median_sq = np.median(D2_all[np.triu_indices(len(X), k=1)]) median_sq = max(median_sq, 1e-12) vals = [] for s in scales: sigma = np.sqrt(s * median_sq) Kxx = _rbf_kernel(X, X, sigma) Kyy = _rbf_kernel(Y, Y, sigma) Kxy = _rbf_kernel(X, Y, sigma) m, n = len(X), len(Y) t_xx = (Kxx.sum() - np.trace(Kxx)) / (m * (m - 1)) t_yy = (Kyy.sum() - np.trace(Kyy)) / (n * (n - 1)) t_xy = Kxy.mean() vals.append(t_xx + t_yy - 2 * t_xy) return float(np.mean(vals)) # ── 2. Energy Distance ────────────────────────────────────────────── def energy_distance(X, Y): Dxy = cdist(X, Y, "euclidean").mean() Dxx = cdist(X, X, "euclidean").mean() Dyy = cdist(Y, Y, "euclidean").mean() return float(2 * Dxy - Dxx - Dyy) # ── 3. Variance Ratio ─────────────────────────────────────────────── def variance_ratio(pred_cells, real_cells, var_threshold=0.001): """Per-gene variance ratio: pred_var / real_var. Perfect = 1.0. Only considers genes with real_var > var_threshold (skip sparse zeros).""" pv = pred_cells.var(axis=0) rv = real_cells.var(axis=0) mask = rv > var_threshold if mask.sum() < 10: return {"var_ratio_median": np.nan, "var_ratio_mean": np.nan, "var_ratio_q25": np.nan, "var_ratio_q75": np.nan, "n_active_genes": int(mask.sum())} ratio = pv[mask] / rv[mask] return { "var_ratio_median": float(np.median(ratio)), "var_ratio_mean": float(np.mean(ratio)), "var_ratio_q25": float(np.percentile(ratio, 25)), "var_ratio_q75": float(np.percentile(ratio, 75)), "n_active_genes": int(mask.sum()), } # ── 4. Gene-Gene Correlation Preservation ─────────────────────────── def gene_corr_preservation(pred_cells, real_cells, top_k=200): """Pearson correlation between flattened gene-gene correlation matrices. Uses top_k highest-variance genes for tractability.""" gene_var = real_cells.var(axis=0) top_idx = np.argsort(gene_var)[-top_k:] pred_sub = pred_cells[:, top_idx] real_sub = real_cells[:, top_idx] pred_corr = np.corrcoef(pred_sub.T) real_corr = np.corrcoef(real_sub.T) idx = np.triu_indices(top_k, k=1) r, _ = pearsonr(pred_corr[idx], real_corr[idx]) return float(r) # ── 5. C2ST (Classifier Two-Sample Test) ──────────────────────────── def c2st(pred_cells, real_cells, max_samples=500): """Logistic regression C2ST. Returns accuracy (0.5 = indistinguishable).""" n_pred = min(len(pred_cells), max_samples) n_real = min(len(real_cells), max_samples) idx_p = np.random.choice(len(pred_cells), n_pred, replace=False) idx_r = np.random.choice(len(real_cells), n_real, replace=False) X = np.vstack([pred_cells[idx_p], real_cells[idx_r]]) y = np.concatenate([np.zeros(n_pred), np.ones(n_real)]) scaler = StandardScaler() X_s = scaler.fit_transform(X) clf = LogisticRegression(max_iter=500, C=1.0, solver="lbfgs") n_cv = min(5, min(n_pred, n_real)) if n_cv < 2: return float("nan") scores = cross_val_score(clf, X_s, y, cv=n_cv, scoring="accuracy") return float(scores.mean()) # ── 6. k-NN Precision / Recall ────────────────────────────────────── def knn_precision_recall(pred_cells, real_cells, k=10): """ Precision: fraction of pred cells whose k-NN ball overlaps real data. Recall: fraction of real cells whose k-NN ball overlaps pred data. """ D_rr = cdist(real_cells, real_cells, "euclidean") np.fill_diagonal(D_rr, np.inf) real_knn_dist = np.sort(D_rr, axis=1)[:, k - 1] D_pp = cdist(pred_cells, pred_cells, "euclidean") np.fill_diagonal(D_pp, np.inf) pred_knn_dist = np.sort(D_pp, axis=1)[:, k - 1] D_pr = cdist(pred_cells, real_cells, "euclidean") precision = float((D_pr <= real_knn_dist[None, :]).any(axis=1).mean()) D_rp = D_pr.T recall = float((D_rp <= pred_knn_dist[None, :]).any(axis=1).mean()) return precision, recall # ── Main ───────────────────────────────────────────────────────────── def evaluate_model(pred_path, real_path, name): pred = ad.read_h5ad(pred_path) real = ad.read_h5ad(real_path) perts = [p for p in pred.obs["perturbation"].unique() if p != "control"] rows = [] for pert in perts: pc = _get_pert_cells(pred, pert) rc = _get_pert_cells(real, pert) if len(pc) < 5 or len(rc) < 5: continue mmd = mmd_multi_sigma(pc, rc) edist = energy_distance(pc, rc) vr = variance_ratio(pc, rc) gc = gene_corr_preservation(pc, rc, top_k=min(200, pc.shape[1])) c2st_acc = c2st(pc, rc) prec, rec = knn_precision_recall(pc, rc, k=min(5, len(rc) - 1)) rows.append({ "perturbation": pert, "mmd": mmd, "energy_distance": edist, **vr, "gene_corr_preservation": gc, "c2st_accuracy": c2st_acc, "knn_precision": prec, "knn_recall": rec, }) df = pd.DataFrame(rows) return df def print_comparison(results: dict[str, pd.DataFrame]): metrics = [ "mmd", "energy_distance", "var_ratio_median", "var_ratio_mean", "gene_corr_preservation", "c2st_accuracy", "knn_precision", "knn_recall", ] lower_better = {"mmd", "energy_distance"} target_metrics = { "var_ratio_median": 1.0, "var_ratio_mean": 1.0, "c2st_accuracy": 0.5, } names = list(results.keys()) short = {n: n[:12] for n in names} header = f"{'Metric':<28}" + "".join(f"{short[n]:>14}" for n in names) + " Best" print(header) print("-" * len(header)) for m in metrics: vals = [] for n in names: vals.append(float(results[n][m].mean())) if m in target_metrics: target = target_metrics[m] dists = [abs(v - target) for v in vals] best_idx = dists.index(min(dists)) elif m in lower_better: best_idx = vals.index(min(vals)) else: best_idx = vals.index(max(vals)) short_names = [short[n] for n in names] line = f"{m:<28}" + "".join(f"{v:>14.4f}" for v in vals) line += f" <- {short_names[best_idx]}" print(line) print("\n=== Variance Ratio (median [Q25, Q75], 1.0 = perfect) ===") for n in names: df = results[n] med = df["var_ratio_median"].mean() q25 = df["var_ratio_q25"].mean() q75 = df["var_ratio_q75"].mean() print(f" {short[n]:<14} {med:.3f} [{q25:.3f}, {q75:.3f}]") if __name__ == "__main__": np.random.seed(42) BASE = "/home/hp250092/ku50001222/qian/aivc/lfj" models = { "A6_SDE50": ( f"{BASE}/GRN/result/SB/A6_dsm_aniso/iteration_195000/pred.h5ad", f"{BASE}/GRN/result/SB/A6_dsm_aniso/iteration_195000/real.h5ad", ), "A6_ODE_RK4": ( f"{BASE}/GRN/result/SB/A6_dsm_aniso/eval_only/pred.h5ad", f"{BASE}/GRN/result/SB/A6_dsm_aniso/eval_only/real.h5ad", ), "A1_Euler": ( f"{BASE}/GRN/result/SB/A1_baseline/iteration_195000/pred.h5ad", f"{BASE}/GRN/result/SB/A1_baseline/iteration_195000/real.h5ad", ), "A1_RK4": ( f"{BASE}/GRN/result/SB/A1_baseline/eval_only/pred.h5ad", f"{BASE}/GRN/result/SB/A1_baseline/eval_only/real.h5ad", ), "scDFM": ( f"{BASE}/GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/pred.h5ad", f"{BASE}/GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/real.h5ad", ), } results = {} for name, (pred_path, real_path) in models.items(): print(f"\n>>> Evaluating {name} ...") df = evaluate_model(pred_path, real_path, name) results[name] = df out_dir = str(pred_path).rsplit("/", 1)[0] df.to_csv(f"{out_dir}/distributional_results.csv", index=False) print(f" saved to {out_dir}/distributional_results.csv") print("\n" + "=" * 100) print("DISTRIBUTIONAL METRICS COMPARISON (mean over 39 perturbations)") print("=" * 100 + "\n") print_comparison(results)