lfj-code / transfer /code /prompt_selection /visualize_comparison.py
ethan1115's picture
Upload folder using huggingface_hub
0161e74 verified
#!/usr/bin/env python3
"""Visualize cell-eval comparison: Prompt Selection vs Random Baseline.
Modes:
--perturbation X : Single perturbation bar chart
--all : Heatmap across all perturbations
"""
from __future__ import annotations
import argparse
import sys
from pathlib import Path
_THIS_DIR = Path(__file__).resolve().parent
if str(_THIS_DIR.parent) not in sys.path:
sys.path.insert(0, str(_THIS_DIR.parent))
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from prompt_selection import config as cfg
# Metrics to exclude (uninformative: all zero, NaN, or identical)
EXCLUDE = {
"overlap_at_50", "overlap_at_100", "overlap_at_200",
"precision_at_50", "precision_at_100", "precision_at_200",
"de_spearman_sig",
"discrimination_score_l1", "discrimination_score_l2", "discrimination_score_cosine",
"de_nsig_counts_real", "de_nsig_counts_pred",
}
DISPLAY_NAMES = {
"overlap_at_N": "Overlap@N",
"overlap_at_500": "Overlap@500",
"precision_at_N": "Precision@N",
"precision_at_500": "Precision@500",
"de_direction_match": "DE Direction\nMatch",
"de_spearman_lfc_sig": "DE Spearman\nLFC",
"de_sig_genes_recall": "DE Sig Genes\nRecall",
"pr_auc": "PR AUC",
"roc_auc": "ROC AUC",
"pearson_delta": "Pearson\nDelta",
"mse": "MSE",
"mae": "MAE",
"mse_delta": "MSE Delta",
"mae_delta": "MAE Delta",
}
LOWER_IS_BETTER = {"mse", "mae", "mse_delta", "mae_delta"}
def visualize_single(pert_name: str):
"""Generate bar chart for a single perturbation."""
pcfg = cfg.get_pert_config(pert_name)
csv_path = pcfg.eval_dir / "comparison_mean.csv"
output_path = pcfg.eval_dir / "comparison_chart.png"
if not csv_path.exists():
print(f"No comparison_mean.csv found for {pert_name}: {csv_path}")
return
df = pd.read_csv(csv_path)
df = df[~df["metric"].isin(EXCLUDE)].copy()
df = df.dropna(subset=["prompt_selection", "random_baseline"]).reset_index(drop=True)
df["display"] = df["metric"].map(DISPLAY_NAMES).fillna(df["metric"])
df["pct_diff"] = np.where(
df["random_baseline"].abs() > 1e-12,
(df["prompt_selection"] - df["random_baseline"]) / df["random_baseline"].abs() * 100,
0.0,
)
error_metrics = df[df["metric"].isin(LOWER_IS_BETTER)].reset_index(drop=True)
quality_metrics = df[~df["metric"].isin(LOWER_IS_BETTER)].reset_index(drop=True)
fig, axes = plt.subplots(
2, 1, figsize=(14, 10),
gridspec_kw={"height_ratios": [max(len(quality_metrics), 1), max(len(error_metrics), 1)]},
)
fig.suptitle(
f"Cell-Eval: Prompt Selection vs Random Baseline ({pert_name} B-cells)",
fontsize=15, fontweight="bold", y=0.98,
)
colors_ps = "#4C72B0"
colors_bl = "#DD8452"
for ax, subset, title, lower_better in [
(axes[0], quality_metrics, "Quality Metrics (higher is better)", False),
(axes[1], error_metrics, "Error Metrics (lower is better)", True),
]:
n = len(subset)
if n == 0:
ax.set_visible(False)
continue
y = np.arange(n)
bar_h = 0.35
ax.barh(y - bar_h / 2, subset["prompt_selection"], bar_h,
label="Prompt Selection", color=colors_ps, edgecolor="white", linewidth=0.5)
ax.barh(y + bar_h / 2, subset["random_baseline"], bar_h,
label="Random Baseline", color=colors_bl, edgecolor="white", linewidth=0.5)
ax.set_yticks(y)
ax.set_yticklabels(subset["display"], fontsize=11)
ax.invert_yaxis()
ax.set_title(title, fontsize=12, fontweight="bold", pad=10)
ax.legend(loc="lower right", fontsize=10)
ax.grid(axis="x", alpha=0.3, linestyle="--")
ax.set_axisbelow(True)
for i, row in subset.iterrows():
idx = subset.index.get_loc(i)
pct = row["pct_diff"]
max_val = max(row["prompt_selection"], row["random_baseline"])
if abs(pct) < 0.01:
label = "0%"
color = "gray"
else:
sign = "+" if pct > 0 else ""
label = f"{sign}{pct:.1f}%"
if lower_better:
color = "#D32F2F" if pct > 0 else "#388E3C"
else:
color = "#388E3C" if pct > 0 else "#D32F2F"
ax.text(max_val * 1.02, idx, label,
va="center", ha="left", fontsize=10, fontweight="bold", color=color)
x_max = subset[["prompt_selection", "random_baseline"]].max().max()
ax.set_xlim(right=x_max * 1.18)
plt.tight_layout(rect=[0, 0, 1, 0.96])
fig.savefig(output_path, dpi=150, bbox_inches="tight", facecolor="white")
print(f"Saved: {output_path}")
def visualize_all():
"""Generate heatmap across all perturbations."""
csv_path = cfg.EVAL_DIR / "all_comparison.csv"
output_path = cfg.EVAL_DIR / "all_comparison_heatmap.png"
if not csv_path.exists():
print(f"No all_comparison.csv found: {csv_path}")
print("Run aggregate_results.py first.")
return
df = pd.read_csv(csv_path)
df = df[~df["metric"].isin(EXCLUDE)].copy()
df = df.dropna(subset=["prompt_selection", "random_baseline"])
# Compute percentage difference
df["pct_diff"] = np.where(
df["random_baseline"].abs() > 1e-12,
(df["prompt_selection"] - df["random_baseline"]) / df["random_baseline"].abs() * 100,
0.0,
)
# For lower-is-better metrics, negate so positive = PS better
for m in LOWER_IS_BETTER:
mask = df["metric"] == m
df.loc[mask, "pct_diff"] = -df.loc[mask, "pct_diff"]
df["display"] = df["metric"].map(DISPLAY_NAMES).fillna(df["metric"])
# Pivot: perturbation × metric
pivot = df.pivot_table(index="perturbation", columns="display", values="pct_diff", aggfunc="first")
pivot = pivot.reindex(sorted(pivot.index))
fig, ax = plt.subplots(figsize=(16, max(8, len(pivot) * 0.6)))
vmax = max(abs(pivot.values[~np.isnan(pivot.values)].min()), abs(pivot.values[~np.isnan(pivot.values)].max()), 10)
im = ax.imshow(pivot.values, cmap="RdYlGn", aspect="auto", vmin=-vmax, vmax=vmax)
ax.set_xticks(range(len(pivot.columns)))
ax.set_xticklabels(pivot.columns, rotation=45, ha="right", fontsize=10)
ax.set_yticks(range(len(pivot.index)))
ax.set_yticklabels(pivot.index, fontsize=11)
# Annotate cells
for i in range(len(pivot.index)):
for j in range(len(pivot.columns)):
val = pivot.values[i, j]
if not np.isnan(val):
color = "white" if abs(val) > vmax * 0.6 else "black"
ax.text(j, i, f"{val:+.1f}%", ha="center", va="center",
fontsize=8, color=color, fontweight="bold")
cbar = plt.colorbar(im, ax=ax, shrink=0.8, pad=0.02)
cbar.set_label("% Difference (positive = PS better)", fontsize=11)
ax.set_title(
"Prompt Selection vs Random Baseline: % Improvement per Perturbation\n"
"(green = PS better, red = Baseline better; error metrics negated)",
fontsize=13, fontweight="bold", pad=15,
)
plt.tight_layout()
fig.savefig(output_path, dpi=150, bbox_inches="tight", facecolor="white")
print(f"Saved: {output_path}")
def main():
parser = argparse.ArgumentParser(description="Visualize cell-eval comparison results")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--perturbation", type=str, help="Single perturbation name")
group.add_argument("--all", action="store_true", help="All perturbations heatmap")
args = parser.parse_args()
if args.all:
visualize_all()
else:
visualize_single(args.perturbation)
if __name__ == "__main__":
main()