| |
| """ |
| Counter vs Consistent Example Analysis Script |
| |
| 2D Heuristic (shared across datasets): |
| Upper part of image (small y) = farther from camera |
| Lower part of image (large y) = closer to camera |
| |
| Datasets: |
| embspatial (default): |
| FAR/CLOSE questions in EmbSpatial-Bench |
| Consistent: GT answer agrees with the 2D heuristic (Height-Depth Entanglement) |
| Counter: GT answer contradicts the 2D heuristic |
| |
| cvbench3d: |
| Depth questions: "Which object is closer to the camera?" |
| Consistent: GT object (closer) has larger center_y (lower in image) |
| Counter: GT object (closer) has smaller center_y (higher in image) |
| Distance questions: "Which object is closer to [reference]?" |
| 2D heuristic: smaller pixel distance to reference = closer in 3D |
| Consistent: GT candidate has smaller 2D pixel distance to reference |
| Counter: GT candidate has larger 2D pixel distance to reference |
| |
| Usage: |
| python experiments/analyze_counter_consistent.py <model_result.xlsx> [--verbose] |
| python experiments/analyze_counter_consistent.py --compare <file1.xlsx> <file2.xlsx> ... |
| python experiments/analyze_counter_consistent.py --dataset cvbench3d <result.xlsx> |
| python experiments/analyze_counter_consistent.py --dataset cvbench3d --compare <file1.xlsx> ... |
| """ |
|
|
| import argparse |
| import ast |
| import pandas as pd |
| import numpy as np |
| from datasets import load_dataset |
| from pathlib import Path |
| from typing import Dict, List, Tuple, Optional |
| import json |
| import sys |
|
|
|
|
| class TeeWriter: |
| """Write stdout to both terminal and file simultaneously""" |
| def __init__(self, filepath): |
| self.terminal = sys.stdout |
| self.file = open(filepath, 'w', encoding='utf-8') |
|
|
| def write(self, message): |
| self.terminal.write(message) |
| self.file.write(message) |
| self.file.flush() |
|
|
| def flush(self): |
| self.terminal.flush() |
| self.file.flush() |
|
|
| def close(self): |
| self.file.close() |
| return self.terminal |
|
|
|
|
| |
| |
| |
|
|
| def get_bbox_center_y(bbox: List[int], source: str = None) -> float: |
| """ |
| BBox -> center y coordinate, format varies by source: |
| ScanNet / MP3D : [x1, y1, w, h ] -> y1 + h/2 |
| AI2Thor : [x1, y1, x2, y2] -> (y1 + y2) / 2 |
| """ |
| if source == 'ai2thor': |
| return (bbox[1] + bbox[3]) / 2 |
| else: |
| return bbox[1] + bbox[3] / 2 |
|
|
|
|
| def classify_sample(relation: str, objects: Dict, gt_answer_idx: int, |
| answer_options: List[str] = None, |
| image_height: int = None, threshold_ratio: float = 0.05, |
| data_source: str = None) -> Tuple[str, Dict]: |
| """ |
| Classify a sample as Consistent / Counter / Ambiguous. |
| |
| Args: |
| relation: 'far' or 'close' |
| objects: {'bbox': [...], 'name': [...]} |
| gt_answer_idx: GT answer index (0-based, relative to answer_options) |
| answer_options: list of answer choices (used to match bbox by name) |
| image_height: image height for threshold normalization (pass PIL image.size[1]) |
| threshold_ratio: ambiguous decision threshold as a fraction of image height |
| data_source: 'scannet' | 'mp3d' | 'ai2thor' (selects bbox format) |
| |
| Returns: |
| classification: 'consistent', 'counter', or 'ambiguous' |
| details: dict with classification details |
| """ |
| if relation not in ['far', 'close']: |
| return 'not_applicable', {} |
|
|
| bboxes = objects['bbox'] |
| names = objects['name'] |
|
|
| if len(bboxes) < 2: |
| return 'insufficient_objects', {} |
|
|
| |
| |
| if answer_options is not None and gt_answer_idx < len(answer_options): |
| gt_answer_name = answer_options[gt_answer_idx] |
| if gt_answer_name in names: |
| gt_answer_idx = names.index(gt_answer_name) |
| elif gt_answer_name == 'Unknown' or gt_answer_idx >= len(bboxes): |
| return 'unknown_object', {} |
|
|
| |
| if gt_answer_idx >= len(bboxes): |
| return 'index_out_of_range', {} |
|
|
| |
| center_ys = [get_bbox_center_y(bbox, source=data_source) for bbox in bboxes] |
|
|
| |
| gt_center_y = center_ys[gt_answer_idx] |
|
|
| |
| other_ys = [y for i, y in enumerate(center_ys) if i != gt_answer_idx] |
| other_avg_y = np.mean(other_ys) |
|
|
| |
| y_diff = gt_center_y - other_avg_y |
|
|
| |
| if image_height: |
| threshold = image_height * threshold_ratio |
| else: |
| threshold = 20 |
|
|
| details = { |
| 'gt_object': names[gt_answer_idx], |
| 'gt_center_y': gt_center_y, |
| 'other_avg_y': other_avg_y, |
| 'y_diff': y_diff, |
| 'threshold': threshold, |
| 'all_objects': list(zip(names, center_ys)) |
| } |
|
|
| |
| if abs(y_diff) < threshold: |
| return 'ambiguous', details |
|
|
| |
| if relation == 'far': |
| if gt_center_y < other_avg_y: |
| return 'consistent', details |
| else: |
| return 'counter', details |
|
|
| |
| else: |
| if gt_center_y > other_avg_y: |
| return 'consistent', details |
| else: |
| return 'counter', details |
|
|
|
|
| def get_image_height_by_source(data_source: str) -> int: |
| """Return fallback image height by data source (used when PIL image is unavailable)""" |
| heights = { |
| 'ai2thor': 300, |
| 'mp3d': 480, |
| 'scannet': 968, |
| } |
| return heights.get(data_source, 480) |
|
|
|
|
| def build_classification_cache(verbose: bool = False) -> Dict[str, Dict]: |
| """ |
| Build a counter/consistent classification cache for the full EmbSpatial-Bench dataset. |
| """ |
| print("Loading EmbSpatial-Bench dataset...") |
| ds = load_dataset('FlagEval/EmbSpatial-Bench', split='test') |
|
|
| cache = {} |
| stats = {'far': {'consistent': 0, 'counter': 0, 'ambiguous': 0}, |
| 'close': {'consistent': 0, 'counter': 0, 'ambiguous': 0}} |
|
|
| for item in ds: |
| question_id = item['question_id'] |
| relation = item['relation'] |
|
|
| if relation not in ['far', 'close']: |
| cache[question_id] = {'classification': 'not_applicable', 'relation': relation} |
| continue |
|
|
| objects = item['objects'] |
| gt_answer_idx = item['answer'] |
| answer_options = item['answer_options'] |
| data_source = item['data_source'] |
|
|
| |
| pil_image = item.get('image') |
| if pil_image is not None and hasattr(pil_image, 'size'): |
| image_height = pil_image.size[1] |
| else: |
| image_height = get_image_height_by_source(data_source) |
|
|
| classification, details = classify_sample( |
| relation, objects, gt_answer_idx, answer_options, image_height, |
| data_source=data_source |
| ) |
|
|
| cache[question_id] = { |
| 'classification': classification, |
| 'relation': relation, |
| 'data_source': item['data_source'], |
| 'details': details |
| } |
|
|
| if relation in stats and classification in stats[relation]: |
| stats[relation][classification] += 1 |
|
|
| if verbose: |
| print("\n=== Classification Statistics ===") |
| for rel in ['far', 'close']: |
| total = sum(stats[rel].values()) |
| print(f"\n{rel.upper()} (n={total}):") |
| for cls, cnt in stats[rel].items(): |
| pct = cnt / total * 100 if total > 0 else 0 |
| print(f" {cls}: {cnt} ({pct:.1f}%)") |
|
|
| return cache |
|
|
|
|
| def analyze_embspatial_results(xlsx_path: str, cache: Dict[str, Dict], |
| verbose: bool = False) -> Tuple[Dict, List[Dict]]: |
| """Analyze a model result xlsx file against the EmbSpatialBench classification cache.""" |
| df = pd.read_excel(xlsx_path) |
|
|
| results = { |
| 'far': { |
| 'consistent': {'correct': 0, 'total': 0}, |
| 'counter': {'correct': 0, 'total': 0}, |
| 'ambiguous': {'correct': 0, 'total': 0} |
| }, |
| 'close': { |
| 'consistent': {'correct': 0, 'total': 0}, |
| 'counter': {'correct': 0, 'total': 0}, |
| 'ambiguous': {'correct': 0, 'total': 0} |
| } |
| } |
|
|
| counter_examples = [] |
|
|
| for _, row in df.iterrows(): |
| question_id = row['question_id'] |
| category = row['category'] |
| hit = row['hit'] |
|
|
| if category not in ['far', 'close']: |
| continue |
|
|
| if question_id not in cache: |
| continue |
|
|
| info = cache[question_id] |
| classification = info['classification'] |
|
|
| if classification not in ['consistent', 'counter', 'ambiguous']: |
| continue |
|
|
| results[category][classification]['total'] += 1 |
| if hit == 1: |
| results[category][classification]['correct'] += 1 |
|
|
| if classification == 'counter': |
| counter_examples.append({ |
| 'question_id': question_id, |
| 'relation': category, |
| 'hit': hit, |
| 'prediction': row['prediction'], |
| 'answer': row['answer'], |
| 'data_source': info['data_source'], |
| 'details': info.get('details', {}) |
| }) |
|
|
| return results, counter_examples |
|
|
|
|
| |
| |
| |
|
|
| |
| |
| _CVBENCH3D_SOURCE_HEIGHTS = { |
| 'Omni3D_Hypersim': 768, |
| 'Omni3D_nuScenes': 900, |
| } |
|
|
|
|
| def classify_cvbench3d_row(row, depth_threshold_ratio: float = 0.05) -> Tuple[str, Dict]: |
| """ |
| Classify a single CV-Bench-3D row as consistent / counter / ambiguous. |
| |
| Only Depth questions are classified — they share the same height-depth |
| entanglement heuristic as EmbSpatial-Bench: |
| 2D heuristic: lower in image (larger center_y) = closer to camera |
| Consistent: GT object (closer to camera) has larger center_y |
| Counter: GT object (closer to camera) has smaller center_y |
| |
| Distance questions ask "which object is closer to [reference] in 3D real-world |
| distance?" — this is inter-object 3D distance, not viewer distance. No |
| equivalent 2D projection heuristic exists (height-depth entanglement does not |
| apply), so Distance rows are always marked 'not_applicable'. |
| """ |
| category = row['category'] |
| answer_letter = str(row['answer']).strip() |
|
|
| if category != 'Depth': |
| return 'not_applicable', {} |
|
|
| try: |
| bbox_list = ast.literal_eval(row['bbox']) |
| except (ValueError, SyntaxError): |
| return 'invalid_bbox', {} |
|
|
| if len(bbox_list) != 2: |
| return 'invalid_bbox', {} |
|
|
| cy_A = (bbox_list[0][1] + bbox_list[0][3]) / 2 |
| cy_B = (bbox_list[1][1] + bbox_list[1][3]) / 2 |
|
|
| gt_y = cy_A if answer_letter == 'A' else cy_B |
| other_y = cy_B if answer_letter == 'A' else cy_A |
| y_diff = gt_y - other_y |
|
|
| |
| source_dataset = str(row.get('source_dataset', '')) |
| known_h = _CVBENCH3D_SOURCE_HEIGHTS.get(source_dataset, 0) |
| est_h = max(bb[3] for bb in bbox_list) |
| image_height = max(known_h, est_h) |
| threshold = image_height * depth_threshold_ratio |
|
|
| details = { |
| 'answer': answer_letter, |
| 'center_y_A': cy_A, |
| 'center_y_B': cy_B, |
| 'y_diff': y_diff, |
| 'threshold': threshold, |
| 'image_height_est': image_height, |
| 'source_dataset': source_dataset, |
| } |
|
|
| if abs(y_diff) < threshold: |
| return 'ambiguous', details |
| |
| return ('consistent' if gt_y > other_y else 'counter'), details |
|
|
|
|
| def analyze_cvbench3d_results(xlsx_path: str, verbose: bool = False, |
| depth_threshold_ratio: float = 0.05) -> Tuple[Dict, List[Dict]]: |
| """ |
| Analyze a CV-Bench-3D result xlsx file. |
| |
| Only the Depth category is classified into consistent / counter / ambiguous, |
| because it shares the height-depth entanglement heuristic with EmbSpatial-Bench. |
| Distance (inter-object 3D distance) has no analogous 2D projection heuristic |
| and is excluded from the consistent/counter analysis. |
| """ |
| df = pd.read_excel(xlsx_path) |
|
|
| results = { |
| 'Depth': { |
| 'consistent': {'correct': 0, 'total': 0}, |
| 'counter': {'correct': 0, 'total': 0}, |
| 'ambiguous': {'correct': 0, 'total': 0}, |
| }, |
| |
| } |
|
|
| counter_examples = [] |
|
|
| for _, row in df.iterrows(): |
| category = row['category'] |
| if category != 'Depth': |
| continue |
|
|
| hit = row['hit'] |
| classification, details = classify_cvbench3d_row(row, depth_threshold_ratio) |
|
|
| if classification not in ['consistent', 'counter', 'ambiguous']: |
| continue |
|
|
| results['Depth'][classification]['total'] += 1 |
| if hit == 1: |
| results['Depth'][classification]['correct'] += 1 |
|
|
| if classification == 'counter': |
| counter_examples.append({ |
| 'index': row['index'], |
| 'category': category, |
| 'hit': hit, |
| 'prediction': row['prediction'], |
| 'answer': row['answer'], |
| 'source_dataset': row.get('source_dataset', ''), |
| 'details': details, |
| }) |
|
|
| if verbose: |
| print("\n=== CV-Bench-3D Depth Classification Statistics ===") |
| total = sum(results['Depth'][c]['total'] for c in ['consistent', 'counter', 'ambiguous']) |
| print(f"Depth (n={total}):") |
| for cls in ['consistent', 'counter', 'ambiguous']: |
| n = results['Depth'][cls]['total'] |
| pct = n / total * 100 if total > 0 else 0 |
| print(f" {cls}: {n} ({pct:.1f}%)") |
| print("(Distance excluded: no 2D heuristic applies for inter-object 3D distance)") |
|
|
| return results, counter_examples |
|
|
|
|
| |
| |
| |
|
|
| _XLSX_SUFFIXES = { |
| 'embspatial': [ |
| '_EmbSpatialBench_openai_result', |
| '_EmbSpatialBench_exact_matching_result', |
| ], |
| 'cvbench3d': [ |
| '_CV-Bench-3D_chatgpt-0125_result', |
| '_CV-Bench-3D_exact_matching_result', |
| ], |
| } |
|
|
|
|
| def extract_model_name(xlsx_path: str, dataset: str) -> str: |
| stem = Path(xlsx_path).stem |
| for suffix in _XLSX_SUFFIXES.get(dataset, []): |
| stem = stem.replace(suffix, '') |
| return stem |
|
|
|
|
| def print_analysis_report(xlsx_path: str, results: Dict, counter_examples: List[Dict], |
| dataset: str) -> Dict: |
| """Print analysis report for a single model (works for any dataset).""" |
| model_name = extract_model_name(xlsx_path, dataset) |
|
|
| print(f"\n{'='*70}") |
| print(f"Model: {model_name}") |
| print(f"{'='*70}") |
|
|
| print(f"\n{'Category':<12} {'Type':<12} {'Correct':<10} {'Total':<10} {'Accuracy':<10}") |
| print("-" * 54) |
|
|
| total_consistent = {'correct': 0, 'total': 0} |
| total_counter = {'correct': 0, 'total': 0} |
|
|
| for category in results: |
| for cls_type in ['consistent', 'counter', 'ambiguous']: |
| data = results[category][cls_type] |
| if data['total'] > 0: |
| acc = data['correct'] / data['total'] * 100 |
| print(f"{category:<12} {cls_type:<12} {data['correct']:<10} {data['total']:<10} {acc:.1f}%") |
|
|
| if cls_type == 'consistent': |
| total_consistent['correct'] += data['correct'] |
| total_consistent['total'] += data['total'] |
| elif cls_type == 'counter': |
| total_counter['correct'] += data['correct'] |
| total_counter['total'] += data['total'] |
|
|
| print("-" * 54) |
| if total_consistent['total'] > 0: |
| acc = total_consistent['correct'] / total_consistent['total'] * 100 |
| print(f"{'TOTAL':<12} {'consistent':<12} {total_consistent['correct']:<10} {total_consistent['total']:<10} {acc:.1f}%") |
| if total_counter['total'] > 0: |
| acc = total_counter['correct'] / total_counter['total'] * 100 |
| print(f"{'TOTAL':<12} {'counter':<12} {total_counter['correct']:<10} {total_counter['total']:<10} {acc:.1f}%") |
|
|
| if total_consistent['total'] > 0 and total_counter['total'] > 0: |
| consistent_acc = total_consistent['correct'] / total_consistent['total'] * 100 |
| counter_acc = total_counter['correct'] / total_counter['total'] * 100 |
| gap = consistent_acc - counter_acc |
| print(f"\nAccuracy Gap (Consistent - Counter): {gap:.1f}%p") |
| print(f" -> Larger gap indicates stronger reliance on the 2D heuristic") |
|
|
| counter_wrong = [ex for ex in counter_examples if ex['hit'] == 0] |
| if len(counter_wrong) > 0: |
| print(f"\n🔍 Counter examples wrong: {len(counter_wrong)} / {len(counter_examples)}") |
|
|
| return { |
| 'model_name': model_name, |
| 'consistent_acc': total_consistent['correct'] / total_consistent['total'] * 100 if total_consistent['total'] > 0 else 0, |
| 'counter_acc': total_counter['correct'] / total_counter['total'] * 100 if total_counter['total'] > 0 else 0, |
| 'consistent_total': total_consistent['total'], |
| 'counter_total': total_counter['total'], |
| } |
|
|
|
|
| def _run_analysis(xlsx_path: str, dataset: str, cache: Optional[Dict] = None, |
| verbose: bool = False, |
| depth_threshold_ratio: float = 0.05) -> Tuple[Dict, List[Dict]]: |
| if dataset == 'cvbench3d': |
| return analyze_cvbench3d_results(xlsx_path, verbose=verbose, |
| depth_threshold_ratio=depth_threshold_ratio) |
| else: |
| return analyze_embspatial_results(xlsx_path, cache, verbose=verbose) |
|
|
|
|
| def compare_models(xlsx_paths: List[str], dataset: str, cache: Optional[Dict] = None): |
| """Compare multiple models side by side.""" |
| summaries = [] |
|
|
| for xlsx_path in xlsx_paths: |
| results, counter_examples = _run_analysis(xlsx_path, dataset, cache) |
| summary = print_analysis_report(xlsx_path, results, counter_examples, dataset) |
| summaries.append(summary) |
|
|
| max_name_len = max(len(s['model_name']) for s in summaries) |
| col_w = max(max_name_len + 2, 40) |
| total_w = col_w + 12 + 12 + 10 |
| print(f"\n{'='*total_w}") |
| print("MODEL COMPARISON") |
| print(f"{'='*total_w}") |
| print(f"{'Model':<{col_w}} {'Consistent':<12} {'Counter':<12} {'Gap':<10}") |
| print("-" * total_w) |
|
|
| for s in summaries: |
| gap = s['consistent_acc'] - s['counter_acc'] |
| print(f"{s['model_name']:<{col_w}} {s['consistent_acc']:.1f}%{'':<6} {s['counter_acc']:.1f}%{'':<6} {gap:+.1f}%p") |
|
|
|
|
| EVAL_OUTPUT_DIR = 'VLMEvalKit/outputs' |
|
|
| DEFAULT_MODELS = [ |
| |
| 'molmo-7B-O-0924/molmo-7B-O-0924', |
| 'molmo-7B-O-0924-data_scale_exp_80k/molmo-7B-O-0924-data_scale_exp_80k', |
| 'molmo-7B-O-0924-data_scale_exp_400k/molmo-7B-O-0924-data_scale_exp_400k', |
| 'molmo-7B-O-0924-data_scale_exp_800k/molmo-7B-O-0924-data_scale_exp_800k', |
| 'molmo-7B-O-0924-data_scale_exp_2m/molmo-7B-O-0924-data_scale_exp_2m', |
| |
| 'NVILA-Lite-2B/NVILA-Lite-2B', |
| 'NVILA-Lite-2B-data-scale-exp-80k/NVILA-Lite-2B-data-scale-exp-80k', |
| 'NVILA-Lite-2B-data-scale-exp-400k/NVILA-Lite-2B-data-scale-exp-400k', |
| 'NVILA-Lite-2B-data-scale-exp-800k/NVILA-Lite-2B-data-scale-exp-800k', |
| 'NVILA-Lite-2B-data-scale-exp-2m/NVILA-Lite-2B-data-scale-exp-2m', |
| 'NVILA-Lite-2B-ST-80k-5pct/NVILA-Lite-2B-ST-80k-5pct', |
| 'NVILA-Lite-2B-ST-400k-5pct/NVILA-Lite-2B-ST-400k-5pct', |
| 'NVILA-Lite-2B-ST-800k-5pct/NVILA-Lite-2B-ST-800k-5pct', |
| 'RoboRefer-2B-SFT/RoboRefer-2B-SFT', |
| |
| 'Qwen2.5-VL-3B-Instruct/Qwen2.5-VL-3B-Instruct', |
| 'Qwen2.5-VL-3B-Instruct-data_scale_exp_80k/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k', |
| 'Qwen2.5-VL-3B-Instruct-data_scale_exp_400k/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k', |
| 'Qwen2.5-VL-3B-Instruct-data_scale_exp_800k/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k', |
| 'Qwen2.5-VL-3B-Instruct-data_scale_exp_2m/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m', |
| 'Qwen3-VL-235B-A22B-Instruct/Qwen3-VL-235B-A22B-Instruct' |
| ] |
|
|
|
|
| def get_default_xlsx_paths(dataset: str) -> List[str]: |
| if dataset == 'cvbench3d': |
| return [f'{EVAL_OUTPUT_DIR}/{m}_CV-Bench-3D_chatgpt-0125_result.xlsx' |
| for m in DEFAULT_MODELS] |
| else: |
| return [f'{EVAL_OUTPUT_DIR}/{m}_EmbSpatialBench_openai_result.xlsx' |
| for m in DEFAULT_MODELS] |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description='Counter vs Consistent Example Analysis') |
| parser.add_argument('xlsx_files', nargs='*', |
| help='Model result xlsx files (uses default model list if omitted)') |
| parser.add_argument('--dataset', choices=['embspatial', 'cvbench3d'], default='embspatial', |
| help='Benchmark dataset to analyze (default: embspatial)') |
| parser.add_argument('--compare', action='store_true', help='Compare multiple models') |
| parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output') |
| parser.add_argument('--output', '-o', type=str, help='Save results to file') |
| parser.add_argument('--save-cache', type=str, |
| help='Save EmbSpatialBench classification cache to JSON') |
| parser.add_argument('--load-cache', type=str, |
| help='Load EmbSpatialBench classification cache from JSON') |
|
|
| args = parser.parse_args() |
|
|
| |
| cache = None |
| if args.dataset == 'embspatial': |
| if args.load_cache and Path(args.load_cache).exists(): |
| print(f"Loading cache from {args.load_cache}...") |
| with open(args.load_cache, 'r') as f: |
| cache = json.load(f) |
| else: |
| cache = build_classification_cache(verbose=args.verbose) |
|
|
| if args.save_cache: |
| print(f"Saving cache to {args.save_cache}...") |
| with open(args.save_cache, 'w') as f: |
| json.dump(cache, f, indent=2) |
|
|
| xlsx_files = args.xlsx_files if args.xlsx_files else get_default_xlsx_paths(args.dataset) |
|
|
| tee = None |
| if args.output: |
| tee = TeeWriter(args.output) |
| sys.stdout = tee |
|
|
| try: |
| if args.compare or len(xlsx_files) > 1: |
| compare_models(xlsx_files, args.dataset, cache) |
| else: |
| results, counter_examples = _run_analysis( |
| xlsx_files[0], args.dataset, cache, args.verbose |
| ) |
| print_analysis_report(xlsx_files[0], results, counter_examples, args.dataset) |
| finally: |
| if tee is not None: |
| sys.stdout = tee.close() |
| print(f"Results saved to {args.output}") |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|