""" Experiment 2-A: Image-conditioned Representation Analysis Goal: Verify Hypothesis 4 - that above/far and below/close are mapped to similar positions in embedding space, while left/right are well-separated. Method: 1. Load EmbSpatialBench data grouped by spatial relation category 2. Extract hidden states from VLM (Vanilla vs. Scaled) for each sample 3. Compute average representation per category 4. Calculate cosine similarity between category pairs 5. Compare: Vanilla model (expected confusion) vs. Scaled model (expected separation) Expected Results: - Vanilla: sim(above, far) > sim(left, right) and sim(below, close) > sim(left, right) - Scaled: The gap should decrease, indicating better separation Supported Models: - Molmo (native olmo checkpoint format) - NVILA (llava.load format) - Qwen2.5-VL (HuggingFace transformers format) """ import os import sys import json import argparse import base64 import logging from io import BytesIO from collections import defaultdict from typing import Dict, List, Tuple, Optional, Any from abc import ABC, abstractmethod import torch import numpy as np import pandas as pd from PIL import Image from tqdm import tqdm import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics.pairwise import cosine_similarity # Setup logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) # ============================================================================ # Data Loading # ============================================================================ def load_embspatial_data(tsv_path: str, samples_per_category: int = 50) -> Dict[str, List[dict]]: """ Load EmbSpatialBench data grouped by spatial relation category. Args: tsv_path: Path to EmbSpatialBench TSV file samples_per_category: Number of samples to use per category Returns: Dictionary mapping category -> list of samples """ df = pd.read_csv(tsv_path, sep='\t') # Group by category grouped_data = defaultdict(list) for _, row in df.iterrows(): category = row['category'] sample = { 'index': row['index'], 'image_base64': row['image'], 'question': row['question'], 'answer': row['answer'], 'category': category, 'options': { 'A': row['A'], 'B': row['B'], 'C': row['C'], 'D': row['D'] } } grouped_data[category].append(sample) # Limit samples per category for category in grouped_data: if len(grouped_data[category]) > samples_per_category: # Random sample indices = np.random.choice( len(grouped_data[category]), samples_per_category, replace=False ) grouped_data[category] = [grouped_data[category][i] for i in indices] logger.info(f"Loaded EmbSpatialBench data:") for cat, samples in grouped_data.items(): logger.info(f" {cat}: {len(samples)} samples") return dict(grouped_data) def decode_base64_image(base64_str: str) -> Image.Image: """Decode base64 string to PIL Image.""" image_data = base64.b64decode(base64_str) return Image.open(BytesIO(image_data)).convert('RGB') # ============================================================================ # Base Extractor Class # ============================================================================ class BaseHiddenStateExtractor(ABC): """Abstract base class for hidden state extraction.""" def __init__( self, model_path: str, device: str = 'cuda', target_layers: Optional[List[int]] = None ): self.model_path = model_path self.device = device self.hidden_states = {} self.hooks = [] logger.info(f"Loading model from {model_path}...") self._load_model() # Determine target layers num_layers = self._get_num_layers() if target_layers is None: # Default: use middle layer self.target_layers = [num_layers // 2] else: self.target_layers = target_layers logger.info(f"Model has {num_layers} layers. Target layers: {self.target_layers}") self._register_hooks() @abstractmethod def _load_model(self): """Load the model. To be implemented by subclasses.""" pass @abstractmethod def _get_num_layers(self) -> int: """Get number of transformer layers.""" pass @abstractmethod def _get_layer_module(self, layer_idx: int): """Get the module for a specific layer.""" pass @abstractmethod def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]: """Extract hidden states for a single sample.""" pass def _register_hooks(self): """Register forward hooks to capture hidden states.""" def make_hook(layer_idx): def hook(module, input, output): # Handle different output types if isinstance(output, tuple): hidden = output[0] else: hidden = output # Pool over sequence dimension (take last token) if hidden.dim() == 3: pooled = hidden[:, -1, :].detach().cpu().float() else: pooled = hidden.detach().cpu().float() self.hidden_states[layer_idx] = pooled logger.debug(f" Captured layer {layer_idx}: shape={pooled.shape}") return hook hooks_registered = 0 for layer_idx in self.target_layers: try: module = self._get_layer_module(layer_idx) if module is not None: hook = module.register_forward_hook(make_hook(layer_idx)) self.hooks.append(hook) hooks_registered += 1 logger.info(f" ✓ Registered hook on layer {layer_idx}") else: logger.warning(f" ✗ Layer {layer_idx} returned None") except Exception as e: logger.warning(f" ✗ Failed to register hook on layer {layer_idx}: {e}") if hooks_registered == 0: raise ValueError(f"Failed to register any hooks! Target layers: {self.target_layers}") logger.info(f"Successfully registered {hooks_registered}/{len(self.target_layers)} hooks") def cleanup(self): """Remove hooks and free memory.""" for hook in self.hooks: hook.remove() self.hooks = [] if hasattr(self, 'model'): del self.model if hasattr(self, 'processor'): del self.processor torch.cuda.empty_cache() # ============================================================================ # Molmo Extractor (Native olmo format) # ============================================================================ class MolmoExtractor(BaseHiddenStateExtractor): """Hidden state extractor for Molmo models (native olmo format).""" def _load_model(self): """Load Molmo model using native olmo library.""" # Check for native checkpoint format config_path = os.path.join(self.model_path, "config.yaml") checkpoint_path = os.path.join(self.model_path, "model.pt") if os.path.exists(config_path) and os.path.exists(checkpoint_path): self._load_native_model() self.is_native = True else: self._load_hf_model() self.is_native = False def _load_native_model(self): """Load native olmo checkpoint.""" from olmo.config import ModelConfig from olmo.model import Molmo as NativeMolmoModel from olmo.data.model_preprocessor import MultiModalPreprocessor from olmo.data.data_formatter import DataFormatter # Prevent PyTorch UnpicklingError _original_load = torch.load def _unsafe_load_wrapper(*args, **kwargs): if 'weights_only' not in kwargs: kwargs['weights_only'] = False return _original_load(*args, **kwargs) torch.load = _unsafe_load_wrapper config_path = os.path.join(self.model_path, "config.yaml") checkpoint_path = os.path.join(self.model_path, "model.pt") cfg = ModelConfig.load(config_path, key="model", validate_paths=False) cfg.init_device = "cpu" self.model = NativeMolmoModel(cfg) state_dict = torch.load(checkpoint_path, map_location="cpu") self.model.load_state_dict(state_dict) self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() self.tokenizer = cfg.get_tokenizer() v_cfg = cfg.vision_backbone h, w = cfg.llm_patches_per_crop() image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) class SafeDataFormatter(DataFormatter): def get_system_prompt(self, style, for_inference, messages, rng=None): if style is None: style = "User" return super().get_system_prompt(style, for_inference, messages, rng) self.formatter = SafeDataFormatter( prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting, system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space, default_inference_len=cfg.default_inference_len ) self.preprocessor = MultiModalPreprocessor( tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type), crop_mode=cfg.crop_mode, max_crops=cfg.max_crops, overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode, use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size, image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h, image_token_length_w=w, image_token_length_h=h, image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask, pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting, ) logger.info(f"Loaded native Molmo model from {self.model_path}") def _load_hf_model(self): """Load HuggingFace format model.""" from transformers import AutoModelForCausalLM, AutoProcessor self.model = AutoModelForCausalLM.from_pretrained( self.model_path, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map=self.device ) self.model.eval() self.processor = AutoProcessor.from_pretrained( self.model_path, trust_remote_code=True ) logger.info(f"Loaded HuggingFace Molmo model from {self.model_path}") def _get_num_layers(self) -> int: """Get number of transformer layers.""" if self.is_native: return len(self.model.transformer.blocks) else: if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): return len(self.model.model.transformer.blocks) return 32 # Default fallback def _get_layer_module(self, layer_idx: int): """Get the module for a specific layer.""" if self.is_native: return self.model.transformer.blocks[layer_idx] else: return self.model.model.transformer.blocks[layer_idx] def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]: """Extract hidden states for a single sample.""" self.hidden_states = {} if self.is_native: example = {"messages": [question], "image": image} messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) image_np = np.array(image) batch = self.preprocessor(image_np, messages, is_training=False, require_image_features=True) if 'input_ids' not in batch and 'input_tokens' in batch: batch['input_ids'] = batch['input_tokens'] def to_tensor(x): if isinstance(x, np.ndarray): return torch.from_numpy(x) return x input_ids = to_tensor(batch['input_ids']).unsqueeze(0).to(self.device) if input_ids.dtype not in [torch.long, torch.int64]: input_ids = input_ids.long() images_tensor = to_tensor(batch['images']).unsqueeze(0).to(self.device).to(dtype=torch.bfloat16) image_masks = to_tensor(batch['image_masks']).unsqueeze(0).to(self.device).to(dtype=torch.bfloat16) image_input_idx = to_tensor(batch['image_input_idx']).unsqueeze(0).to(self.device) with torch.inference_mode(): with torch.autocast(device_type="cuda", enabled=True, dtype=torch.bfloat16): # Just do forward pass to trigger hooks _ = self.model( input_ids=input_ids, images=images_tensor, image_masks=image_masks, image_input_idx=image_input_idx, ) else: inputs = self.processor.process(images=[image], text=question) # Cast float tensors to bfloat16 to match model dtype processed_inputs = {} for k, v in inputs.items(): v = v.to(self.device).unsqueeze(0) if v.dtype == torch.float32: v = v.to(dtype=torch.bfloat16) processed_inputs[k] = v with torch.no_grad(): _ = self.model(**processed_inputs) return self.hidden_states.copy() # ============================================================================ # NVILA Extractor # ============================================================================ class NVILAExtractor(BaseHiddenStateExtractor): """Hidden state extractor for NVILA models.""" def _load_model(self): """Load NVILA model using llava.load.""" # Handle import conflicts original_sys_path = sys.path.copy() sys.path = [p for p in sys.path if 'RoboRefer' not in p] modules_to_remove = [key for key in list(sys.modules.keys()) if 'llava' in key.lower()] removed_modules = {} for mod in modules_to_remove: removed_modules[mod] = sys.modules.pop(mod) try: import llava from llava.media import Image as LLaVAImage from llava import conversation as clib except Exception as err: sys.path = original_sys_path for mod, module in removed_modules.items(): sys.modules[mod] = module raise RuntimeError(f"Failed to import llava: {err}") sys.path = original_sys_path self.LLaVAImage = LLaVAImage self.clib = clib self.model = llava.load(self.model_path, model_base=None) # Get the underlying model for hook registration # NVILA wraps the model, need to find the LLM backbone self._find_llm_backbone() logger.info(f"Loaded NVILA model from {self.model_path}") def _find_llm_backbone(self): """Find the LLM backbone module for hook registration.""" # NVILA structure: Try multiple paths candidates = [] # Path 1: model.llm.model.layers if hasattr(self.model, 'llm'): if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): candidates.append(('model.llm.model.layers', self.model.llm.model.layers)) if hasattr(self.model.llm, 'layers'): candidates.append(('model.llm.layers', self.model.llm.layers)) # Path 2: model.model.model.layers if hasattr(self.model, 'model'): if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): candidates.append(('model.model.model.layers', self.model.model.model.layers)) if hasattr(self.model.model, 'layers'): candidates.append(('model.model.layers', self.model.model.layers)) # Path 3: Search all named_modules for name, module in self.model.named_modules(): if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: candidates.append((name, module)) if candidates: # Use the first valid candidate path, layers = candidates[0] logger.info(f"Found LLM layers at: {path} (num_layers={len(layers)})") self.llm_backbone = layers self.layers_path = path else: logger.error("Could not find transformer layers in model!") logger.info("Available modules:") for name, _ in list(self.model.named_modules())[:20]: logger.info(f" {name}") raise ValueError("Could not locate transformer layers in NVILA model") def _get_num_layers(self) -> int: """Get number of transformer layers.""" if hasattr(self, 'llm_backbone') and hasattr(self.llm_backbone, '__len__'): return len(self.llm_backbone) return 24 # Default for NVILA-Lite-2B def _get_layer_module(self, layer_idx: int): """Get the module for a specific layer.""" if hasattr(self, 'llm_backbone') and hasattr(self.llm_backbone, '__getitem__'): module = self.llm_backbone[layer_idx] logger.info(f" Accessing layer {layer_idx}: {type(module).__name__}") return module logger.error(f"Cannot access layer {layer_idx} - llm_backbone not properly initialized") return None def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]: """Extract hidden states for a single sample.""" self.hidden_states = {} # Save image to temp file for NVILA import tempfile with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: temp_path = f.name image.save(temp_path) try: prompt = [self.LLaVAImage(temp_path), question] # Forward pass through generate to trigger hooks from transformers import GenerationConfig gen_config = GenerationConfig(max_new_tokens=1, do_sample=False) _ = self.model.generate_content(prompt, generation_config=gen_config) finally: os.unlink(temp_path) return self.hidden_states.copy() # ============================================================================ # Qwen2.5-VL Extractor # ============================================================================ class Qwen25VLExtractor(BaseHiddenStateExtractor): """Hidden state extractor for Qwen2.5-VL models.""" # Base model for loading processor (has chat_template) BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" def _load_model(self): """Load Qwen2.5-VL model.""" from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor # Try with device_map first, fall back to manual .to() if accelerate not available try: self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( self.model_path, torch_dtype=torch.bfloat16, device_map=self.device ) except ImportError: logger.info("accelerate not available, loading model without device_map...") self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( self.model_path, torch_dtype=torch.bfloat16, ) self.model = self.model.to(self.device) self.model.eval() # For fine-tuned models (local paths), load processor from base model # because fine-tuned checkpoints may not have chat_template if self.model_path.startswith('/'): logger.info(f"Fine-tuned model detected, loading processor from base model: {self.BASE_MODEL}") self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) else: self.processor = AutoProcessor.from_pretrained(self.model_path) logger.info(f"Loaded Qwen2.5-VL model from {self.model_path}") def _get_num_layers(self) -> int: """Get number of transformer layers.""" return len(self.model.model.layers) def _get_layer_module(self, layer_idx: int): """Get the module for a specific layer.""" return self.model.model.layers[layer_idx] def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]: """Extract hidden states for a single sample.""" self.hidden_states = {} # Build message format messages = [ { "role": "user", "content": [ {"type": "image", "image": image}, {"type": "text", "text": question} ] } ] # Process input text = self.processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) # Process image from qwen_vl_utils import process_vision_info image_inputs, video_inputs = process_vision_info(messages) inputs = self.processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt" ) inputs = inputs.to(self.device) with torch.no_grad(): _ = self.model(**inputs) return self.hidden_states.copy() # ============================================================================ # Factory Function # ============================================================================ def get_extractor(model_type: str, model_path: str, **kwargs) -> BaseHiddenStateExtractor: """Factory function to create the appropriate extractor.""" extractors = { 'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor, } if model_type not in extractors: raise ValueError(f"Unknown model type: {model_type}. Available: {list(extractors.keys())}") return extractors[model_type](model_path, **kwargs) # ============================================================================ # Analysis Functions # ============================================================================ def extract_category_representations( extractor: BaseHiddenStateExtractor, data: Dict[str, List[dict]], layer_idx: int ) -> Dict[str, np.ndarray]: """ Extract average hidden state representation per category. """ category_states = defaultdict(list) for category, samples in data.items(): logger.info(f"Processing category: {category}") success_count = 0 for sample in tqdm(samples, desc=f" {category}"): try: image = decode_base64_image(sample['image_base64']) hidden_states = extractor.extract(image, sample['question']) if layer_idx in hidden_states: state = hidden_states[layer_idx].numpy().flatten() if state.size > 0: # Check if state is not empty category_states[category].append(state) success_count += 1 else: logger.warning(f" Layer {layer_idx} not in hidden_states. Available: {list(hidden_states.keys())}") except Exception as e: logger.warning(f" Error processing sample {sample['index']}: {e}") continue logger.info(f" {category}: Successfully extracted {success_count}/{len(samples)} samples") # Average per category category_avg = {} for category, states in category_states.items(): if states: category_avg[category] = np.mean(states, axis=0) logger.info(f" {category}: {len(states)} samples, dim={category_avg[category].shape}") else: logger.error(f" {category}: No states collected!") if not category_avg: raise ValueError("No representations were extracted! Check hook registration and model forward pass.") return category_avg def compute_similarity_matrix( representations: Dict[str, np.ndarray] ) -> pd.DataFrame: """Compute pairwise cosine similarity between category representations.""" categories = sorted(representations.keys()) vectors = np.array([representations[cat] for cat in categories]) sim_matrix = cosine_similarity(vectors) return pd.DataFrame(sim_matrix, index=categories, columns=categories) def analyze_hypothesis(sim_df: pd.DataFrame, model_name: str) -> dict: """Analyze the similarity matrix to test Hypothesis 4.""" results = {'model': model_name} pairs_to_check = { 'above_far': ('above', 'far'), 'under_close': ('under', 'close'), 'left_right': ('left', 'right'), } for pair_name, (cat1, cat2) in pairs_to_check.items(): if cat1 in sim_df.index and cat2 in sim_df.columns: sim = sim_df.loc[cat1, cat2] results[f'sim_{pair_name}'] = sim logger.info(f" {pair_name}: sim({cat1}, {cat2}) = {sim:.4f}") else: logger.warning(f" {cat1} or {cat2} not found in similarity matrix") results[f'sim_{pair_name}'] = None # Calculate differences if results.get('sim_above_far') and results.get('sim_left_right'): results['diff_above_far_vs_left_right'] = results['sim_above_far'] - results['sim_left_right'] if results.get('sim_under_close') and results.get('sim_left_right'): results['diff_under_close_vs_left_right'] = results['sim_under_close'] - results['sim_left_right'] return results # ============================================================================ # Visualization # ============================================================================ def plot_similarity_heatmap(sim_df: pd.DataFrame, title: str, save_path: str): """Plot and save similarity heatmap.""" plt.figure(figsize=(10, 8)) category_order = ['left', 'right', 'above', 'far', 'under', 'close'] available_order = [c for c in category_order if c in sim_df.index] sim_df_ordered = sim_df.loc[available_order, available_order] sns.heatmap( sim_df_ordered, annot=True, fmt='.3f', cmap='RdYlBu_r', center=0.5, vmin=0, vmax=1, square=True, linewidths=0.5, cbar_kws={'label': 'Cosine Similarity'} ) plt.title(title, fontsize=14, fontweight='bold') plt.tight_layout() plt.savefig(save_path, dpi=300, bbox_inches='tight') plt.close() logger.info(f"Saved heatmap: {save_path}") def plot_comparison(results_list: List[dict], save_path: str): """Plot comparison of similarity pairs across models.""" pairs = ['sim_above_far', 'sim_under_close', 'sim_left_right'] pair_labels = ['above-far', 'under-close', 'left-right'] fig, ax = plt.subplots(figsize=(12, 6)) x = np.arange(len(pairs)) width = 0.8 / len(results_list) for i, result in enumerate(results_list): model = result['model'] values = [result.get(p, 0) or 0 for p in pairs] offset = (i - len(results_list) / 2 + 0.5) * width bars = ax.bar(x + offset, values, width, label=model) for bar, val in zip(bars, values): if val: ax.annotate( f'{val:.3f}', xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()), xytext=(0, 3), textcoords='offset points', ha='center', va='bottom', fontsize=8 ) ax.set_ylabel('Cosine Similarity') ax.set_title('Spatial Concept Similarity Comparison\n(Hypothesis 4: above-far & under-close should be > left-right for vanilla)') ax.set_xticks(x) ax.set_xticklabels(pair_labels) ax.legend(loc='upper right', fontsize=8) ax.set_ylim(0, 1) ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) plt.tight_layout() plt.savefig(save_path, dpi=300, bbox_inches='tight') plt.close() logger.info(f"Saved comparison plot: {save_path}") # ============================================================================ # Model Configurations # ============================================================================ MODEL_CONFIGS = { 'molmo': { 'vanilla': 'allenai/Molmo-7B-O-0924', '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared', '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared', '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared', '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared', }, 'nvila': { 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', }, 'qwen': { 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', '80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', '400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', '800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', '2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', }, } # ============================================================================ # Main # ============================================================================ def main(): parser = argparse.ArgumentParser(description='Experiment 2-A: Embedding Space Analysis') parser.add_argument('--data_path', type=str, default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv', help='Path to EmbSpatialBench TSV file') parser.add_argument('--model_type', type=str, required=True, choices=['molmo', 'nvila', 'qwen'], help='Model type to analyze') parser.add_argument('--scales', type=str, nargs='+', default=['vanilla', '80k', '400k', '800k', '2m'], help='Model scales to analyze') parser.add_argument('--output_dir', type=str, default='/data/shared/Qwen/experiments/exp2a_results', help='Output directory') parser.add_argument('--samples_per_category', type=int, default=50, help='Number of samples per category') parser.add_argument('--layer_idx', type=int, default=None, help='Layer index to analyze (default: middle layer)') parser.add_argument('--device', type=str, default='cuda', help='Device to use') parser.add_argument('--seed', type=int, default=42, help='Random seed') args = parser.parse_args() # Set random seed np.random.seed(args.seed) torch.manual_seed(args.seed) # Create output directory output_dir = os.path.join(args.output_dir, args.model_type) os.makedirs(output_dir, exist_ok=True) # Load data logger.info("\n=== Loading EmbSpatialBench Data ===") data = load_embspatial_data(args.data_path, args.samples_per_category) results_list = [] model_configs = MODEL_CONFIGS[args.model_type] for scale in args.scales: if scale not in model_configs: logger.warning(f"Scale {scale} not available for {args.model_type}, skipping...") continue model_path = model_configs[scale] # Check if path exists if not os.path.exists(model_path) and not model_path.startswith('Qwen/') and not model_path.startswith('allenai/'): logger.warning(f"Model path not found: {model_path}, skipping...") continue logger.info(f"\n=== Processing {args.model_type} - {scale} ===") logger.info(f"Model path: {model_path}") try: # Determine target layers target_layers = [args.layer_idx] if args.layer_idx is not None else None extractor = get_extractor( args.model_type, model_path, device=args.device, target_layers=target_layers ) # Use actual layer index layer_idx = extractor.target_layers[0] # Extract representations reps = extract_category_representations(extractor, data, layer_idx) sim_df = compute_similarity_matrix(reps) logger.info(f"\n--- {scale} Model Similarity Matrix ---") logger.info(f"\n{sim_df.round(3)}") # Analyze and save model_name = f"{args.model_type}_{scale}" results = analyze_hypothesis(sim_df, model_name) results_list.append(results) # Save heatmap plot_similarity_heatmap( sim_df, f'Spatial Concept Similarity - {args.model_type.upper()} ({scale})', os.path.join(output_dir, f'heatmap_{scale}.png') ) # Save similarity matrix sim_df.to_csv(os.path.join(output_dir, f'similarity_{scale}.csv')) # Cleanup extractor.cleanup() except Exception as e: logger.error(f"Failed to process {args.model_type} - {scale}: {e}") import traceback traceback.print_exc() continue # Plot comparison if len(results_list) > 1: plot_comparison(results_list, os.path.join(output_dir, 'comparison.png')) # Save results summary if results_list: results_df = pd.DataFrame(results_list) results_df.to_csv(os.path.join(output_dir, 'results_summary.csv'), index=False) logger.info("\n=== Analysis Complete ===") logger.info(f"Results saved to: {output_dir}") # Print summary logger.info("\n--- Summary ---") for result in results_list: logger.info(f"\n{result['model']}:") for key, val in result.items(): if key != 'model' and val is not None: logger.info(f" {key}: {val:.4f}") if __name__ == '__main__': main()