| """ |
| accuracy_chart.py - Generate accuracy charts for swap_analysis from saved JSON data. |
| |
| Reads per-scale pred_stats_{scale}.json and category_validity_{scale}.json |
| from results/{model}/json/ and saves plots to results/{model}/plots/accuracy/. |
| |
| Output files per model: |
| accuracy_chart.png - combined summary (all panels) |
| accuracy_group_bars.png - per-group (orig/swap/both) bar chart across scales |
| accuracy_trajectory.png - both-correct trajectory line plot across scales |
| accuracy_category.png - per-category accuracy (orig vs swap) across scales |
| |
| Processes all models and all available scales by default. |
| |
| Usage: |
| python accuracy_chart.py |
| """ |
|
|
| import os |
| import json |
| import re |
| import numpy as np |
| import matplotlib |
| matplotlib.use('Agg') |
| import matplotlib.pyplot as plt |
|
|
| RESULTS_DIR = os.path.join(os.path.dirname(__file__), 'results') |
|
|
| SCALE_ORDER = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] |
| GROUP_ORDER = ['horizontal', 'vertical', 'distance'] |
| CATEGORY_ORDER = ['left', 'right', 'above', 'under', 'far', 'close'] |
|
|
| SCALE_COLORS = { |
| 'vanilla': '#1f77b4', |
| '80k': '#ff7f0e', |
| '400k': '#2ca02c', |
| '800k': '#d62728', |
| '2m': '#9467bd', |
| 'roborefer':'#8c564b', |
| } |
| GROUP_COLORS = { |
| 'horizontal': '#2ca02c', |
| 'vertical': '#ff7f0e', |
| 'distance': '#9467bd', |
| } |
| |
| CAT_COLORS = { |
| 'left': '#2ca02c', 'right': '#98df8a', |
| 'above': '#ff7f0e', 'under': '#ffbb78', |
| 'far': '#9467bd', 'close': '#c5b0d5', |
| } |
|
|
|
|
| |
|
|
| def load_pred_stats(json_dir): |
| """Load all pred_stats_{scale}.json files. Returns list of dicts.""" |
| records = [] |
| for fname in os.listdir(json_dir): |
| m = re.match(r'pred_stats_(.+)\.json$', fname) |
| if not m: |
| continue |
| scale = m.group(1) |
| with open(os.path.join(json_dir, fname)) as f: |
| data = json.load(f) |
| data['scale'] = scale |
| records.append(data) |
| return records |
|
|
|
|
| def load_category_validity(json_dir): |
| """Load all category_validity_{scale}.json files. Returns {scale: dict}.""" |
| result = {} |
| for fname in os.listdir(json_dir): |
| m = re.match(r'category_validity_(.+)\.json$', fname) |
| if not m: |
| continue |
| scale = m.group(1) |
| with open(os.path.join(json_dir, fname)) as f: |
| result[scale] = json.load(f) |
| return result |
|
|
|
|
| |
|
|
| def plot_group_bars(pred_stats, model_type, ax_list): |
| """ |
| Draw per-group (orig/swap/both) grouped bar chart across scales. |
| ax_list: list of 3 Axes (one per group). |
| """ |
| available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] |
| x = np.arange(3) |
| width = 0.8 / max(len(available), 1) |
|
|
| for idx, group in enumerate(GROUP_ORDER): |
| ax = ax_list[idx] |
| for i, scale in enumerate(available): |
| entry = next((d for d in pred_stats if d['scale'] == scale), None) |
| if entry is None: |
| continue |
| vals = [ |
| entry.get(f'{group}_acc_orig', 0), |
| entry.get(f'{group}_acc_swap', 0), |
| entry.get(f'{group}_acc_both', 0), |
| ] |
| offset = (i - len(available) / 2 + 0.5) * width |
| ax.bar(x + offset, vals, width, |
| label=scale, color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) |
| ax.set_xticks(x) |
| ax.set_xticklabels(['orig', 'swap', 'both'], fontsize=10) |
| ax.set_ylabel('Accuracy', fontsize=9) |
| ax.set_title(f'{group.capitalize()}', fontweight='bold', fontsize=11, |
| color=GROUP_COLORS.get(group, 'black')) |
| ax.legend(fontsize=7, ncol=2) |
| ax.set_ylim(0, 1.15) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) |
| ax.grid(True, alpha=0.3, axis='y') |
|
|
|
|
| def plot_both_trajectory(pred_stats, model_type, ax): |
| """Line plot: acc_both per group across scales.""" |
| available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] |
| x_ticks = range(len(available)) |
|
|
| for group in GROUP_ORDER: |
| y_vals = [] |
| for scale in available: |
| entry = next((d for d in pred_stats if d['scale'] == scale), None) |
| y_vals.append(entry.get(f'{group}_acc_both', 0) if entry else 0) |
| ax.plot(x_ticks, y_vals, '-o', |
| color=GROUP_COLORS.get(group, 'gray'), |
| label=group, linewidth=2.5, markersize=7) |
|
|
| |
| y_overall = [] |
| for scale in available: |
| entry = next((d for d in pred_stats if d['scale'] == scale), None) |
| y_overall.append(entry.get('overall_acc_both', 0) if entry else 0) |
| ax.plot(x_ticks, y_overall, '--s', |
| color='black', label='overall', linewidth=2, markersize=6, alpha=0.7) |
|
|
| ax.set_xticks(list(x_ticks)) |
| ax.set_xticklabels(available, fontsize=9) |
| ax.set_xlabel('Scale', fontsize=9) |
| ax.set_ylabel('Accuracy (both correct)', fontsize=9) |
| ax.set_title('Both-Correct Accuracy Trajectory', fontweight='bold', fontsize=11) |
| ax.legend(fontsize=9) |
| ax.set_ylim(0, 1.05) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) |
| ax.grid(True, alpha=0.3) |
|
|
|
|
| def plot_overall_trajectory(pred_stats, model_type, ax): |
| """Line plot: overall acc_orig/acc_swap/acc_both across scales.""" |
| available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] |
| x_ticks = range(len(available)) |
|
|
| for metric, label, ls in [ |
| ('overall_acc_orig', 'orig', '-o'), |
| ('overall_acc_swap', 'swap', '-s'), |
| ('overall_acc_both', 'both', '-^'), |
| ]: |
| y_vals = [] |
| for scale in available: |
| entry = next((d for d in pred_stats if d['scale'] == scale), None) |
| y_vals.append(entry.get(metric, 0) if entry else 0) |
| ax.plot(x_ticks, y_vals, ls, label=label, linewidth=2.2, markersize=6) |
|
|
| ax.set_xticks(list(x_ticks)) |
| ax.set_xticklabels(available, fontsize=9) |
| ax.set_xlabel('Scale', fontsize=9) |
| ax.set_ylabel('Overall Accuracy', fontsize=9) |
| ax.set_title('Overall Accuracy Trajectory', fontweight='bold', fontsize=11) |
| ax.legend(fontsize=9) |
| ax.set_ylim(0, 1.05) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) |
| ax.grid(True, alpha=0.3) |
|
|
|
|
| def plot_category_accuracy(cat_validity, model_type, ax_orig, ax_swap, pred_stats=None): |
| """ |
| Heatmap-style grouped bars: per-category + overall acc_orig and acc_swap across scales. |
| ax_orig: Axes for acc_orig, ax_swap: Axes for acc_swap. |
| """ |
| available = [s for s in SCALE_ORDER if s in cat_validity] |
| cats_with_overall = CATEGORY_ORDER + ['overall'] |
| x = np.arange(len(cats_with_overall)) |
| width = 0.8 / max(len(available), 1) |
|
|
| |
| overall_metric = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} |
|
|
| for ax, metric, title in [ |
| (ax_orig, 'acc_orig', 'Per-Category Accuracy (orig)'), |
| (ax_swap, 'acc_swap', 'Per-Category Accuracy (swap)'), |
| ]: |
| for i, scale in enumerate(available): |
| cv = cat_validity[scale] |
| vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] |
| |
| if pred_stats is not None: |
| entry = next((d for d in pred_stats if d['scale'] == scale), None) |
| vals.append(entry.get(overall_metric[metric], 0) if entry else 0) |
| else: |
| vals.append(0) |
| offset = (i - len(available) / 2 + 0.5) * width |
| ax.bar(x + offset, vals, width, |
| label=scale, color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) |
|
|
| |
| for j, cat in enumerate(CATEGORY_ORDER): |
| c = CAT_COLORS.get(cat, 'gray') |
| ax.axvspan(j - 0.45, j + 0.45, color=c, alpha=0.06, linewidth=0) |
|
|
| |
| sep = len(CATEGORY_ORDER) - 0.5 |
| ax.axvline(x=sep, color='black', linewidth=1.2, linestyle=':', alpha=0.6) |
|
|
| ax.set_xticks(x) |
| ax.set_xticklabels(cats_with_overall, fontsize=9, rotation=15) |
| ax.set_ylabel('Accuracy', fontsize=9) |
| ax.set_title(title, fontweight='bold', fontsize=11) |
| ax.legend(fontsize=7, ncol=2) |
| ax.set_ylim(0, 1.15) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) |
| ax.grid(True, alpha=0.3, axis='y') |
|
|
| |
| if available: |
| last_scale = available[-1] |
| cv = cat_validity[last_scale] |
| for j, cat in enumerate(CATEGORY_ORDER): |
| reliable = cv.get(cat, {}).get('reliable', True) |
| if not reliable: |
| ax.text(j, 1.08, 'β', ha='center', va='center', |
| fontsize=9, color='red', fontweight='bold') |
|
|
|
|
| |
|
|
| def plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=None): |
| """ |
| One figure per scale: side-by-side acc_orig and acc_swap per category + overall. |
| Saves category_accuracy_{scale}.png. |
| """ |
| overall_metric = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} |
| cats_with_overall = CATEGORY_ORDER + ['overall'] |
|
|
| for scale in sorted(cat_validity.keys(), key=lambda s: SCALE_ORDER.index(s) if s in SCALE_ORDER else 99): |
| cv = cat_validity[scale] |
| ps_entry = next((d for d in pred_stats if d['scale'] == scale), None) if pred_stats else None |
|
|
| fig, axes = plt.subplots(1, 2, figsize=(16, 5)) |
| x = np.arange(len(cats_with_overall)) |
| width = 0.55 |
|
|
| for ax, metric, title in [ |
| (axes[0], 'acc_orig', f'acc_orig ({scale})'), |
| (axes[1], 'acc_swap', f'acc_swap ({scale})'), |
| ]: |
| vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] |
| overall_val = ps_entry.get(overall_metric[metric], 0) if ps_entry else 0 |
| vals.append(overall_val) |
| colors = [CAT_COLORS.get(cat, 'gray') for cat in CATEGORY_ORDER] + ['#333333'] |
| bars = ax.bar(x, vals, width, color=colors, alpha=0.85, edgecolor='white') |
|
|
| |
| ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', linewidth=1.2, |
| linestyle=':', alpha=0.6) |
|
|
| ax.set_xticks(x) |
| ax.set_xticklabels(cats_with_overall, fontsize=10) |
| ax.set_ylabel('Accuracy', fontsize=10) |
| ax.set_title(title, fontweight='bold', fontsize=12) |
| ax.set_ylim(0, 1.15) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) |
| ax.grid(True, alpha=0.3, axis='y') |
| for j, (bar, cat) in enumerate(zip(bars, cats_with_overall)): |
| reliable = cv.get(cat, {}).get('reliable', True) if cat != 'overall' else True |
| h = bar.get_height() |
| ax.text(bar.get_x() + bar.get_width() / 2, h + 0.02, |
| f'{h:.2f}' + ('' if reliable else ' β'), |
| ha='center', va='bottom', fontsize=8, |
| color='red' if not reliable else 'black') |
|
|
| fig.suptitle(f'{model_type.upper()} - Category Accuracy ({scale})', |
| fontsize=13, fontweight='bold') |
| plt.tight_layout() |
| out = os.path.join(save_dir, f'category_accuracy_{scale}.png') |
| plt.savefig(out, dpi=200, bbox_inches='tight') |
| plt.close() |
| print(f" Saved {out}") |
|
|
|
|
| |
|
|
| def save_accuracy_group_bars(pred_stats, model_type, save_dir): |
| fig, axes = plt.subplots(1, 3, figsize=(21, 6)) |
| plot_group_bars(pred_stats, model_type, axes) |
| fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', |
| fontsize=15, fontweight='bold') |
| plt.tight_layout() |
| out = os.path.join(save_dir, 'accuracy_group_bars.png') |
| plt.savefig(out, dpi=200, bbox_inches='tight') |
| plt.close() |
| print(f" Saved {out}") |
|
|
|
|
| def save_accuracy_trajectory(pred_stats, model_type, save_dir): |
| fig, axes = plt.subplots(1, 2, figsize=(16, 6)) |
| plot_both_trajectory(pred_stats, model_type, axes[0]) |
| plot_overall_trajectory(pred_stats, model_type, axes[1]) |
| fig.suptitle(f'{model_type.upper()} - Accuracy Trajectory Across Scales', |
| fontsize=14, fontweight='bold') |
| plt.tight_layout() |
| out = os.path.join(save_dir, 'accuracy_trajectory.png') |
| plt.savefig(out, dpi=200, bbox_inches='tight') |
| plt.close() |
| print(f" Saved {out}") |
|
|
|
|
| def save_accuracy_category(cat_validity, model_type, save_dir, pred_stats=None): |
| fig, axes = plt.subplots(1, 2, figsize=(20, 6)) |
| plot_category_accuracy(cat_validity, model_type, axes[0], axes[1], pred_stats=pred_stats) |
| fig.suptitle(f'{model_type.upper()} - Per-Category Accuracy Across Scales', |
| fontsize=14, fontweight='bold') |
| plt.tight_layout() |
| out = os.path.join(save_dir, 'accuracy_category.png') |
| plt.savefig(out, dpi=200, bbox_inches='tight') |
| plt.close() |
| print(f" Saved {out}") |
|
|
|
|
| def save_accuracy_chart(pred_stats, cat_validity, model_type, save_dir): |
| """ |
| Combined summary figure (accuracy_chart.png): |
| Row 1: group bars x3 |
| Row 2: both-correct trajectory | overall trajectory | (cat orig + cat swap stacked) |
| Layout: 2 rows x 3 cols, last col splits into 2 sub-axes. |
| """ |
| fig = plt.figure(figsize=(24, 14)) |
|
|
| |
| ax_h = fig.add_subplot(3, 3, 1) |
| ax_v = fig.add_subplot(3, 3, 2) |
| ax_d = fig.add_subplot(3, 3, 3) |
| plot_group_bars(pred_stats, model_type, [ax_h, ax_v, ax_d]) |
|
|
| |
| ax_traj_both = fig.add_subplot(3, 3, 4) |
| ax_traj_ovr = fig.add_subplot(3, 3, 5) |
| plot_both_trajectory(pred_stats, model_type, ax_traj_both) |
| plot_overall_trajectory(pred_stats, model_type, ax_traj_ovr) |
|
|
| |
| ax_note = fig.add_subplot(3, 3, 6) |
| ax_note.axis('off') |
| available_scales = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] |
| note_lines = [f'Scales: {", ".join(available_scales)}', |
| '', 'β = unreliable category', '-- = 0.5 chance level'] |
| ax_note.text(0.1, 0.6, '\n'.join(note_lines), transform=ax_note.transAxes, |
| fontsize=11, va='top', family='monospace') |
|
|
| |
| ax_cat_orig = fig.add_subplot(3, 1, 3) |
| |
| |
| ax_cat_orig.remove() |
| ax_co = fig.add_subplot(3, 2, 5) |
| ax_cs = fig.add_subplot(3, 2, 6) |
| plot_category_accuracy(cat_validity, model_type, ax_co, ax_cs, pred_stats=pred_stats) |
|
|
| fig.suptitle(f'{model_type.upper()} β Accuracy Summary', |
| fontsize=17, fontweight='bold', y=1.01) |
| plt.tight_layout() |
| out = os.path.join(save_dir, 'accuracy_chart.png') |
| plt.savefig(out, dpi=200, bbox_inches='tight') |
| plt.close() |
| print(f" Saved {out}") |
|
|
|
|
| |
|
|
| def main(): |
| if not os.path.isdir(RESULTS_DIR): |
| print(f"Results directory not found: {RESULTS_DIR}") |
| return |
|
|
| for model in sorted(os.listdir(RESULTS_DIR)): |
| model_dir = os.path.join(RESULTS_DIR, model) |
| if not os.path.isdir(model_dir): |
| continue |
|
|
| json_dir = os.path.join(model_dir, 'json') |
| if not os.path.isdir(json_dir): |
| print(f"[{model}] no json/ dir, skipping") |
| continue |
|
|
| pred_stats = load_pred_stats(json_dir) |
| cat_validity = load_category_validity(json_dir) |
|
|
| if not pred_stats: |
| print(f"[{model}] no pred_stats files found, skipping") |
| continue |
|
|
| |
| pred_stats.sort(key=lambda d: SCALE_ORDER.index(d['scale']) |
| if d['scale'] in SCALE_ORDER else 99) |
|
|
| save_dir = os.path.join(model_dir, 'plots', 'accuracy') |
| os.makedirs(save_dir, exist_ok=True) |
|
|
| print(f"\n[{model}] scales: {[d['scale'] for d in pred_stats]}") |
| save_accuracy_group_bars(pred_stats, model, save_dir) |
| save_accuracy_trajectory(pred_stats, model, save_dir) |
| if cat_validity: |
| save_accuracy_category(cat_validity, model, save_dir, pred_stats=pred_stats) |
| plot_category_per_scale(cat_validity, model, save_dir, pred_stats=pred_stats) |
| save_accuracy_chart(pred_stats, cat_validity, model, save_dir) |
|
|
| print("\nDone.") |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|