experiments / exp2a_modified /exp2a_modified_embedding_analysis.py
ch-min's picture
Add files using upload-large-folder tool
19898f1 verified
"""
Experiment 2-A (Modified): Image-conditioned Representation Analysis
Modification from original:
- Remove task format confound by unifying answer format
- All answers are pure spatial concepts: left, right, above, under, far, close
- Pairwise: "Is the {obj1} to the left or right of the {obj2}?" -> "left"
- Distance: "Compared to {ref}, is {target} far or close from you?" -> "far"
- 200 samples per category (up from 50)
Goal: Verify Hypothesis 4 - that above/far and under/close are mapped to similar
positions in embedding space, while left/right are well-separated.
"""
import os
import sys
import json
import argparse
import base64
import logging
import random
import re
from io import BytesIO
from collections import defaultdict
from typing import Dict, List, Tuple, Optional, Any
from abc import ABC, abstractmethod
import torch
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics.pairwise import cosine_similarity
# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Category order for output
CATEGORY_ORDER = ['left', 'right', 'above', 'under', 'far', 'close']
# Pair definitions for trajectory analysis
TRAJECTORY_PAIRS = {
'hypothesis': [
('above', 'far', 'above-far', '#d62728'), # red
('under', 'close', 'under-close', '#1f77b4'), # blue
],
'within_axis': [
('left', 'right', 'left-right', '#2ca02c'), # green
('above', 'under', 'above-under', '#ff7f0e'), # orange
('far', 'close', 'far-close', '#9467bd'), # purple
],
'counter_hypothesis': [
('above', 'close', 'above-close', '#e377c2'), # pink
('under', 'far', 'under-far', '#17becf'), # cyan
],
}
# Scale colors for cross-scale plots
SCALE_COLORS = {
'vanilla': '#1f77b4',
'80k': '#ff7f0e',
'400k': '#2ca02c',
'800k': '#d62728',
'2m': '#9467bd',
'roborefer': '#8c564b',
}
# ============================================================================
# Data Loading & Modification
# ============================================================================
# Regex patterns for extracting objects from pairwise questions
OBJECT_PATTERNS = [
re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE),
re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE),
re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE),
re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE),
re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE),
]
def extract_objects(question: str) -> Tuple[str, str]:
"""Extract two objects from a pairwise relation question."""
for pattern in OBJECT_PATTERNS:
m = pattern.search(question)
if m:
return m.group(1).strip(), m.group(2).strip()
raise ValueError(f"Could not extract objects from: {question}")
def modify_pairwise_sample(sample: dict) -> dict:
"""Modify a pairwise relation sample (left/right/above/under)."""
obj1, obj2 = extract_objects(sample['question'])
category = sample['category']
if category in ['left', 'right']:
new_question = f"Is the {obj1} to the left or right of the {obj2}?"
else: # above, under
new_question = f"Is the {obj1} above or under the {obj2}?"
return {
'index': sample['index'],
'image_base64': sample['image_base64'],
'question': new_question,
'answer': category,
'category': category,
}
def modify_distance_sample(sample: dict, rng: random.Random) -> dict:
"""Modify a distance relation sample (far/close)."""
category = sample['category']
answer_key = sample['answer'] # e.g. "C"
options = sample['options'] # {'A': 'table', 'B': 'towel', ...}
target_object = options[answer_key]
candidates = [v for k, v in options.items() if k != answer_key]
reference_object = rng.choice(candidates)
new_question = f"Compared to {reference_object}, is {target_object} far or close from you?"
return {
'index': sample['index'],
'image_base64': sample['image_base64'],
'question': new_question,
'answer': category,
'category': category,
}
def load_and_modify_data(
tsv_path: str,
samples_per_category: int = 200,
seed: int = 42
) -> Dict[str, List[dict]]:
"""
Load EmbSpatialBench data, modify questions to remove format confound.
"""
rng = random.Random(seed)
np.random.seed(seed)
df = pd.read_csv(tsv_path, sep='\t')
# Group by category
raw_grouped = defaultdict(list)
for _, row in df.iterrows():
category = row['category']
sample = {
'index': row['index'],
'image_base64': row['image'],
'question': row['question'],
'answer': row['answer'],
'category': category,
'options': {
'A': row['A'],
'B': row['B'],
'C': row['C'],
'D': row['D']
}
}
raw_grouped[category].append(sample)
# Sample and modify
modified_data = defaultdict(list)
stats = {'total': 0, 'success': 0, 'failed': 0}
for category in CATEGORY_ORDER:
samples = raw_grouped[category]
# Sample up to samples_per_category
if len(samples) > samples_per_category:
indices = np.random.choice(len(samples), samples_per_category, replace=False)
samples = [samples[i] for i in indices]
for sample in samples:
stats['total'] += 1
try:
if category in ['left', 'right', 'above', 'under']:
modified = modify_pairwise_sample(sample)
else: # far, close
modified = modify_distance_sample(sample, rng)
# Validate
assert modified['answer'] == modified['category']
modified_data[category].append(modified)
stats['success'] += 1
except Exception as e:
stats['failed'] += 1
logger.warning(f" Failed to modify sample {sample['index']}: {e}")
logger.info(f"Data modification: {stats['success']}/{stats['total']} success, {stats['failed']} failed")
for cat in CATEGORY_ORDER:
if cat in modified_data:
logger.info(f" {cat}: {len(modified_data[cat])} samples")
# Show first example
ex = modified_data[cat][0]
logger.info(f" Example Q: {ex['question']}")
logger.info(f" Example A: {ex['answer']}")
return dict(modified_data)
def decode_base64_image(base64_str: str) -> Image.Image:
"""Decode base64 string to PIL Image."""
image_data = base64.b64decode(base64_str)
return Image.open(BytesIO(image_data)).convert('RGB')
# ============================================================================
# Base Extractor
# ============================================================================
class BaseHiddenStateExtractor(ABC):
"""Base class for extracting hidden states from VLMs."""
def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None):
self.model_path = model_path
self.device = device
self.hidden_states = {}
self.hooks = []
self._load_model()
num_layers = self._get_num_layers()
if target_layers is None:
self.target_layers = list(range(num_layers))
logger.info(f"Model has {num_layers} layers. Extracting ALL layers (0..{num_layers-1})")
else:
self.target_layers = target_layers
logger.info(f"Model has {num_layers} layers. Target layers: {self.target_layers}")
self._register_hooks()
def _register_hooks(self):
"""Register forward hooks on target layers."""
for layer_idx in self.target_layers:
module = self._get_layer_module(layer_idx)
if module is not None:
hook = module.register_forward_hook(self._make_hook(layer_idx))
self.hooks.append(hook)
logger.info(f" Registered hook on layer {layer_idx}")
def _make_hook(self, layer_idx: int):
"""Create a hook function for a specific layer."""
def hook_fn(module, input, output):
if isinstance(output, tuple):
hidden = output[0]
else:
hidden = output
# Last token pooling
last_token = hidden[:, -1, :].detach().cpu().float()
self.hidden_states[layer_idx] = last_token.squeeze(0)
return hook_fn
@abstractmethod
def _load_model(self):
pass
@abstractmethod
def _get_num_layers(self) -> int:
pass
@abstractmethod
def _get_layer_module(self, layer_idx: int):
pass
@abstractmethod
def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]:
pass
def cleanup(self):
"""Remove hooks and free memory."""
for hook in self.hooks:
hook.remove()
self.hooks = []
if hasattr(self, 'model'):
del self.model
if hasattr(self, 'processor'):
del self.processor
torch.cuda.empty_cache()
# ============================================================================
# Molmo Extractor
# ============================================================================
class MolmoExtractor(BaseHiddenStateExtractor):
"""Hidden state extractor for Molmo models (native olmo format)."""
def _load_model(self):
config_path = os.path.join(self.model_path, "config.yaml")
checkpoint_path = os.path.join(self.model_path, "model.pt")
if os.path.exists(config_path) and os.path.exists(checkpoint_path):
self._load_native_model()
self.is_native = True
else:
self._load_hf_model()
self.is_native = False
def _load_native_model(self):
from olmo.config import ModelConfig
from olmo.model import Molmo as NativeMolmoModel
from olmo.data.model_preprocessor import MultiModalPreprocessor
from olmo.data.data_formatter import DataFormatter
_original_load = torch.load
def _unsafe_load_wrapper(*args, **kwargs):
if 'weights_only' not in kwargs:
kwargs['weights_only'] = False
return _original_load(*args, **kwargs)
torch.load = _unsafe_load_wrapper
config_path = os.path.join(self.model_path, "config.yaml")
checkpoint_path = os.path.join(self.model_path, "model.pt")
cfg = ModelConfig.load(config_path, key="model", validate_paths=False)
cfg.init_device = "cpu"
self.model = NativeMolmoModel(cfg)
state_dict = torch.load(checkpoint_path, map_location="cpu")
self.model.load_state_dict(state_dict)
self.model = self.model.to(self.device, dtype=torch.bfloat16).eval()
self.tokenizer = cfg.get_tokenizer()
v_cfg = cfg.vision_backbone
h, w = cfg.llm_patches_per_crop()
image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None)
class SafeDataFormatter(DataFormatter):
def get_system_prompt(self, style, for_inference, messages, rng=None):
if style is None:
style = "User"
return super().get_system_prompt(style, for_inference, messages, rng)
self.formatter = SafeDataFormatter(
prompt_templates=cfg.prompt_type,
message_format=cfg.message_formatting,
system_prompt=cfg.system_prompt_kind,
always_start_with_space=cfg.always_start_with_space,
default_inference_len=cfg.default_inference_len
)
self.preprocessor = MultiModalPreprocessor(
tokenizer=self.tokenizer,
normalize=str(v_cfg.image_model_type),
crop_mode=cfg.crop_mode,
max_crops=cfg.max_crops,
overlap_margins=cfg.overlap_margins,
resize=v_cfg.resize_mode,
use_col_tokens=cfg.use_col_tokens,
base_image_input_size=v_cfg.image_default_input_size,
image_pooling_w=cfg.image_pooling_w,
image_pooling_h=cfg.image_pooling_h,
image_token_length_w=w,
image_token_length_h=h,
image_patch_size=v_cfg.image_patch_size,
image_padding_mask=image_padding_mask,
pad_value=cfg.pad_value,
loss_token_weighting=cfg.multi_annotation_weighting,
)
logger.info(f"Loaded native Molmo model from {self.model_path}")
def _load_hf_model(self):
from transformers import AutoModelForCausalLM, AutoProcessor
self.model = AutoModelForCausalLM.from_pretrained(
self.model_path,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map=self.device
)
self.model.eval()
self.processor = AutoProcessor.from_pretrained(
self.model_path,
trust_remote_code=True
)
logger.info(f"Loaded HuggingFace Molmo model from {self.model_path}")
def _get_num_layers(self) -> int:
if self.is_native:
return len(self.model.transformer.blocks)
else:
if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'):
return len(self.model.model.transformer.blocks)
return 32
def _get_layer_module(self, layer_idx: int):
if self.is_native:
return self.model.transformer.blocks[layer_idx]
else:
return self.model.model.transformer.blocks[layer_idx]
def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]:
self.hidden_states = {}
if self.is_native:
example = {"messages": [question], "image": image}
messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random)
image_np = np.array(image)
batch = self.preprocessor(image_np, messages, is_training=False, require_image_features=True)
if 'input_ids' not in batch and 'input_tokens' in batch:
batch['input_ids'] = batch['input_tokens']
def to_tensor(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x)
return x
input_ids = to_tensor(batch['input_ids']).unsqueeze(0).to(self.device)
if input_ids.dtype not in [torch.long, torch.int64]:
input_ids = input_ids.long()
images_tensor = to_tensor(batch['images']).unsqueeze(0).to(self.device).to(dtype=torch.bfloat16)
image_masks = to_tensor(batch['image_masks']).unsqueeze(0).to(self.device).to(dtype=torch.bfloat16)
image_input_idx = to_tensor(batch['image_input_idx']).unsqueeze(0).to(self.device)
with torch.inference_mode():
with torch.autocast(device_type="cuda", enabled=True, dtype=torch.bfloat16):
_ = self.model(
input_ids=input_ids,
images=images_tensor,
image_masks=image_masks,
image_input_idx=image_input_idx,
)
else:
inputs = self.processor.process(images=[image], text=question)
processed_inputs = {}
for k, v in inputs.items():
v = v.to(self.device).unsqueeze(0)
if v.dtype == torch.float32:
v = v.to(dtype=torch.bfloat16)
processed_inputs[k] = v
with torch.no_grad():
_ = self.model(**processed_inputs)
return self.hidden_states.copy()
# ============================================================================
# NVILA Extractor
# ============================================================================
class NVILAExtractor(BaseHiddenStateExtractor):
"""Hidden state extractor for NVILA models."""
def _load_model(self):
original_sys_path = sys.path.copy()
sys.path = [p for p in sys.path if 'RoboRefer' not in p]
modules_to_remove = [key for key in list(sys.modules.keys()) if 'llava' in key.lower()]
removed_modules = {}
for mod in modules_to_remove:
removed_modules[mod] = sys.modules.pop(mod)
try:
import llava
from llava.media import Image as LLaVAImage
from llava import conversation as clib
except Exception as err:
sys.path = original_sys_path
for mod, module in removed_modules.items():
sys.modules[mod] = module
raise RuntimeError(f"Failed to import llava: {err}")
sys.path = original_sys_path
self.LLaVAImage = LLaVAImage
self.clib = clib
self.model = llava.load(self.model_path, model_base=None)
self._find_llm_backbone()
logger.info(f"Loaded NVILA model from {self.model_path}")
def _find_llm_backbone(self):
"""Find the LLM backbone module for hook registration."""
candidates = []
if hasattr(self.model, 'llm'):
if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'):
candidates.append(('model.llm.model.layers', self.model.llm.model.layers))
if hasattr(self.model.llm, 'layers'):
candidates.append(('model.llm.layers', self.model.llm.layers))
if hasattr(self.model, 'model'):
if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'):
candidates.append(('model.model.model.layers', self.model.model.model.layers))
if hasattr(self.model.model, 'layers'):
candidates.append(('model.model.layers', self.model.model.layers))
for name, module in self.model.named_modules():
if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0:
candidates.append((name, module))
if candidates:
path, layers = candidates[0]
logger.info(f"Found LLM layers at: {path} (num_layers={len(layers)})")
self.llm_backbone = layers
self.layers_path = path
else:
logger.error("Could not find transformer layers in model!")
for name, _ in list(self.model.named_modules())[:20]:
logger.info(f" {name}")
raise ValueError("Could not locate transformer layers in NVILA model")
def _get_num_layers(self) -> int:
if hasattr(self, 'llm_backbone') and hasattr(self.llm_backbone, '__len__'):
return len(self.llm_backbone)
return 24
def _get_layer_module(self, layer_idx: int):
if hasattr(self, 'llm_backbone') and hasattr(self.llm_backbone, '__getitem__'):
module = self.llm_backbone[layer_idx]
logger.info(f" Accessing layer {layer_idx}: {type(module).__name__}")
return module
logger.error(f"Cannot access layer {layer_idx} - llm_backbone not properly initialized")
return None
def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]:
self.hidden_states = {}
import tempfile
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f:
temp_path = f.name
image.save(temp_path)
try:
prompt = [self.LLaVAImage(temp_path), question]
from transformers import GenerationConfig
gen_config = GenerationConfig(max_new_tokens=1, do_sample=False)
_ = self.model.generate_content(prompt, generation_config=gen_config)
finally:
os.unlink(temp_path)
return self.hidden_states.copy()
# ============================================================================
# RoboRefer Extractor (NVILA-based)
# ============================================================================
class RoboReferExtractor(NVILAExtractor):
"""Hidden state extractor for RoboRefer models (NVILA-based, different llava path)."""
ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer'
def _load_model(self):
original_sys_path = sys.path.copy()
# Add RoboRefer path (opposite of NVILA which removes it)
if self.ROBOREFER_PATH not in sys.path:
sys.path.insert(0, self.ROBOREFER_PATH)
# Clear any existing llava modules to avoid conflicts
modules_to_remove = [key for key in list(sys.modules.keys()) if 'llava' in key.lower()]
removed_modules = {}
for mod in modules_to_remove:
removed_modules[mod] = sys.modules.pop(mod)
try:
import llava
from llava.media import Image as LLaVAImage
from llava import conversation as clib
except Exception as err:
sys.path = original_sys_path
for mod, module in removed_modules.items():
sys.modules[mod] = module
raise RuntimeError(f"Failed to import RoboRefer llava: {err}")
sys.path = original_sys_path
self.LLaVAImage = LLaVAImage
self.clib = clib
self.model = llava.load(self.model_path, model_base=None)
self._find_llm_backbone()
logger.info(f"Loaded RoboRefer model from {self.model_path}")
# ============================================================================
# Qwen2.5-VL Extractor
# ============================================================================
class Qwen25VLExtractor(BaseHiddenStateExtractor):
"""Hidden state extractor for Qwen2.5-VL models."""
BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct"
def _load_model(self):
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
try:
self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
self.model_path,
torch_dtype=torch.bfloat16,
device_map=self.device
)
except ImportError:
logger.info("accelerate not available, loading model without device_map...")
self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
self.model_path,
torch_dtype=torch.bfloat16,
)
self.model = self.model.to(self.device)
self.model.eval()
if self.model_path.startswith('/'):
logger.info(f"Fine-tuned model detected, loading processor from base model: {self.BASE_MODEL}")
self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL)
else:
self.processor = AutoProcessor.from_pretrained(self.model_path)
logger.info(f"Loaded Qwen2.5-VL model from {self.model_path}")
def _get_num_layers(self) -> int:
return len(self.model.model.layers)
def _get_layer_module(self, layer_idx: int):
return self.model.model.layers[layer_idx]
def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]:
self.hidden_states = {}
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": question}
]
}
]
text = self.processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
from qwen_vl_utils import process_vision_info
image_inputs, video_inputs = process_vision_info(messages)
inputs = self.processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt"
)
inputs = inputs.to(self.device)
with torch.no_grad():
_ = self.model(**inputs)
return self.hidden_states.copy()
# ============================================================================
# Factory Function
# ============================================================================
def get_extractor(model_type: str, model_path: str, scale: str = None, **kwargs) -> BaseHiddenStateExtractor:
# RoboRefer uses NVILA architecture but needs different llava import path
if model_type == 'nvila' and scale == 'roborefer':
return RoboReferExtractor(model_path, **kwargs)
extractors = {
'molmo': MolmoExtractor,
'nvila': NVILAExtractor,
'qwen': Qwen25VLExtractor,
}
if model_type not in extractors:
raise ValueError(f"Unknown model type: {model_type}. Available: {list(extractors.keys())}")
return extractors[model_type](model_path, **kwargs)
# ============================================================================
# Analysis Functions
# ============================================================================
def extract_all_layer_representations(
extractor: BaseHiddenStateExtractor,
data: Dict[str, List[dict]],
) -> Dict[int, Dict[str, np.ndarray]]:
"""Extract average hidden state representations for ALL target layers at once.
Returns:
Dict mapping layer_idx -> {category -> avg_vector}
"""
# category_states[layer_idx][category] = list of vectors
category_states = defaultdict(lambda: defaultdict(list))
for category in CATEGORY_ORDER:
if category not in data:
continue
samples = data[category]
logger.info(f"Processing category: {category}")
success_count = 0
for sample in tqdm(samples, desc=f" {category}"):
try:
image = decode_base64_image(sample['image_base64'])
hidden_states = extractor.extract(image, sample['question'])
for layer_idx in extractor.target_layers:
if layer_idx in hidden_states:
state = hidden_states[layer_idx].numpy().flatten()
if state.size > 0:
category_states[layer_idx][category].append(state)
if any(l in hidden_states for l in extractor.target_layers):
success_count += 1
else:
logger.warning(f" No target layers found. Available: {list(hidden_states.keys())}")
except Exception as e:
logger.warning(f" Error processing sample {sample['index']}: {e}")
continue
logger.info(f" {category}: Successfully extracted {success_count}/{len(samples)} samples")
# Average per category per layer
result = {}
for layer_idx in extractor.target_layers:
category_avg = {}
for category, states in category_states[layer_idx].items():
if states:
category_avg[category] = np.mean(states, axis=0)
if category_avg:
result[layer_idx] = category_avg
logger.info(f" Layer {layer_idx}: {len(category_avg)} categories collected")
else:
logger.error(f" Layer {layer_idx}: No states collected!")
if not result:
raise ValueError("No representations were extracted!")
return result
def compute_similarity_matrix(
representations: Dict[str, np.ndarray]
) -> pd.DataFrame:
"""Compute pairwise cosine similarity with fixed category order."""
available = [c for c in CATEGORY_ORDER if c in representations]
vectors = np.array([representations[cat] for cat in available])
sim_matrix = cosine_similarity(vectors)
return pd.DataFrame(sim_matrix, index=available, columns=available)
def analyze_hypothesis(sim_df: pd.DataFrame, model_name: str) -> dict:
"""Analyze the similarity matrix to test Hypothesis 4."""
results = {'model': model_name}
pairs_to_check = {
'above_far': ('above', 'far'),
'under_close': ('under', 'close'),
'left_right': ('left', 'right'),
}
for pair_name, (cat1, cat2) in pairs_to_check.items():
if cat1 in sim_df.index and cat2 in sim_df.columns:
sim = sim_df.loc[cat1, cat2]
results[f'sim_{pair_name}'] = sim
logger.info(f" {pair_name}: sim({cat1}, {cat2}) = {sim:.4f}")
else:
results[f'sim_{pair_name}'] = None
if results.get('sim_above_far') and results.get('sim_left_right'):
results['diff_above_far_vs_left_right'] = results['sim_above_far'] - results['sim_left_right']
if results.get('sim_under_close') and results.get('sim_left_right'):
results['diff_under_close_vs_left_right'] = results['sim_under_close'] - results['sim_left_right']
return results
# ============================================================================
# Visualization
# ============================================================================
def plot_similarity_heatmap(sim_df: pd.DataFrame, title: str, save_path: str):
"""Plot and save similarity heatmap with fixed category order."""
plt.figure(figsize=(10, 8))
available_order = [c for c in CATEGORY_ORDER if c in sim_df.index]
sim_df_ordered = sim_df.loc[available_order, available_order]
sns.heatmap(
sim_df_ordered,
annot=True,
fmt='.4f',
cmap='RdYlBu_r',
center=0.5,
vmin=0,
vmax=1,
square=True,
linewidths=0.5,
cbar_kws={'label': 'Cosine Similarity'}
)
plt.title(title, fontsize=14, fontweight='bold')
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved heatmap: {save_path}")
def plot_comparison(results_list: List[dict], save_path: str):
"""Plot comparison of similarity pairs across models."""
pairs = ['sim_above_far', 'sim_under_close', 'sim_left_right']
pair_labels = ['above-far', 'under-close', 'left-right']
fig, ax = plt.subplots(figsize=(12, 6))
x = np.arange(len(pairs))
width = 0.8 / len(results_list)
for i, result in enumerate(results_list):
model = result['model']
values = [result.get(p, 0) or 0 for p in pairs]
offset = (i - len(results_list) / 2 + 0.5) * width
bars = ax.bar(x + offset, values, width, label=model)
for bar, val in zip(bars, values):
if val:
ax.annotate(
f'{val:.3f}',
xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()),
xytext=(0, 3),
textcoords='offset points',
ha='center',
va='bottom',
fontsize=8
)
ax.set_ylabel('Cosine Similarity')
ax.set_title('Spatial Concept Similarity Comparison (Modified Format)\n(Hypothesis 4: above-far & under-close should be > left-right for vanilla)')
ax.set_xticks(x)
ax.set_xticklabels(pair_labels)
ax.legend(loc='upper right', fontsize=8)
ax.set_ylim(0, 1)
ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved comparison plot: {save_path}")
def _extract_pair_trajectory(
all_layer_sims: Dict[int, pd.DataFrame],
cat1: str, cat2: str,
) -> Tuple[List[int], List[float]]:
"""Extract similarity values for a pair across all layers."""
layers = sorted(all_layer_sims.keys())
valid_layers = []
values = []
for l in layers:
df = all_layer_sims[l]
if cat1 in df.index and cat2 in df.columns:
valid_layers.append(l)
values.append(df.loc[cat1, cat2])
return valid_layers, values
def get_representative_layers(all_layers: List[int], n: int = 5) -> List[int]:
"""Pick n representative layers (evenly spaced) for heatmap output."""
if len(all_layers) <= n:
return list(all_layers)
indices = np.linspace(0, len(all_layers) - 1, n, dtype=int)
return [all_layers[i] for i in indices]
def plot_similarity_trajectories(
all_layer_sims: Dict[int, pd.DataFrame],
title: str,
save_path: str,
):
"""Plot similarity of key category pairs across all layers.
Left panel: absolute cosine similarity per pair across layers.
Right panel: difference from left-right baseline (positive = more similar than L-R).
"""
fig, axes = plt.subplots(1, 2, figsize=(20, 7))
# --- Left panel: absolute similarity ---
ax = axes[0]
for cat1, cat2, label, color in TRAJECTORY_PAIRS['hypothesis']:
layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2)
ax.plot(layers, vals, '-', color=color, label=label, linewidth=2.5, markersize=0)
for cat1, cat2, label, color in TRAJECTORY_PAIRS['within_axis']:
layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2)
ax.plot(layers, vals, '--', color=color, label=label, linewidth=1.8, markersize=0)
for cat1, cat2, label, color in TRAJECTORY_PAIRS['counter_hypothesis']:
layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2)
ax.plot(layers, vals, ':', color=color, label=label, linewidth=1.5, alpha=0.8)
ax.set_xlabel('Layer Index', fontsize=12)
ax.set_ylabel('Cosine Similarity', fontsize=12)
ax.set_title(f'{title}\nPairwise Similarity Across Layers', fontsize=13)
ax.legend(fontsize=9, loc='best')
ax.grid(True, alpha=0.3)
# --- Right panel: difference from left-right ---
ax = axes[1]
lr_layers, lr_vals = _extract_pair_trajectory(all_layer_sims, 'left', 'right')
lr_dict = dict(zip(lr_layers, lr_vals))
for cat1, cat2, label, color in TRAJECTORY_PAIRS['hypothesis']:
layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2)
diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)]
ax.plot(layers, diffs, '-', color=color, label=f'{label} - left-right',
linewidth=2.5, markersize=0)
for cat1, cat2, label, color in TRAJECTORY_PAIRS['counter_hypothesis']:
layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2)
diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)]
ax.plot(layers, diffs, ':', color=color, label=f'{label} - left-right',
linewidth=1.5, alpha=0.8)
# Also show above-under and far-close as references
for cat1, cat2, label, color in TRAJECTORY_PAIRS['within_axis']:
if label == 'left-right':
continue
layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2)
diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)]
ax.plot(layers, diffs, '--', color=color, label=f'{label} - left-right',
linewidth=1.5, alpha=0.7)
ax.axhline(y=0, color='gray', linestyle='-', linewidth=1, alpha=0.5)
ax.set_xlabel('Layer Index', fontsize=12)
ax.set_ylabel('Similarity Difference (pair - left-right)', fontsize=12)
ax.set_title(f'{title}\nRelative to Left-Right Baseline', fontsize=13)
ax.legend(fontsize=8, loc='best')
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved trajectory plot: {save_path}")
def plot_cross_scale_trajectories(
cross_scale_data: Dict[str, Dict[int, pd.DataFrame]],
model_type: str,
save_path: str,
):
"""Compare layer-wise trajectories across training scales.
3 columns: above-far, under-close, left-right (control).
Each subplot shows one line per scale.
"""
pairs = [
('above', 'far', 'above-far (hypothesis)'),
('under', 'close', 'under-close (hypothesis)'),
('left', 'right', 'left-right (control)'),
]
fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6))
if len(pairs) == 1:
axes = [axes]
for idx, (cat1, cat2, label) in enumerate(pairs):
ax = axes[idx]
for scale in ['vanilla', '80k', '400k', '800k', '2m', 'roborefer']:
if scale not in cross_scale_data:
continue
layer_sims = cross_scale_data[scale]
layers, vals = _extract_pair_trajectory(layer_sims, cat1, cat2)
color = SCALE_COLORS.get(scale, 'gray')
ax.plot(layers, vals, '-', color=color, label=scale, linewidth=2, markersize=0)
ax.set_xlabel('Layer Index', fontsize=12)
ax.set_ylabel('Cosine Similarity', fontsize=12)
ax.set_title(label, fontsize=13, fontweight='bold')
ax.legend(fontsize=10)
ax.grid(True, alpha=0.3)
fig.suptitle(
f'{model_type.upper()} - Similarity Trajectory Across Scales',
fontsize=15, fontweight='bold', y=1.02
)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved cross-scale trajectory: {save_path}")
def plot_similarity_evolution_heatmap(
cross_scale_data: Dict[str, Dict[int, pd.DataFrame]],
model_type: str,
save_path: str,
):
"""2D heatmap: x=layer, y=scale, color=similarity for each hypothesis pair.
Gives a bird's-eye view of how both network depth and training data scale
affect the similarity between hypothesis-relevant category pairs.
"""
pairs = [
('above', 'far', 'above-far'),
('under', 'close', 'under-close'),
('left', 'right', 'left-right'),
('above', 'under', 'above-under'),
('far', 'close', 'far-close'),
]
scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer']
available_scales = [s for s in scale_order if s in cross_scale_data]
# Determine layer range from first available scale
first_scale = available_scales[0]
all_layers = sorted(cross_scale_data[first_scale].keys())
fig, axes = plt.subplots(len(pairs), 1, figsize=(max(14, len(all_layers) * 0.5), 3 * len(pairs)))
if len(pairs) == 1:
axes = [axes]
for idx, (cat1, cat2, label) in enumerate(pairs):
ax = axes[idx]
# Build matrix: rows=scales, cols=layers
matrix = np.full((len(available_scales), len(all_layers)), np.nan)
for si, scale in enumerate(available_scales):
layer_sims = cross_scale_data[scale]
for li, layer in enumerate(all_layers):
if layer in layer_sims:
df = layer_sims[layer]
if cat1 in df.index and cat2 in df.columns:
matrix[si, li] = df.loc[cat1, cat2]
im = ax.imshow(matrix, aspect='auto', cmap='RdYlBu_r', vmin=0.5, vmax=1.0)
ax.set_yticks(range(len(available_scales)))
ax.set_yticklabels(available_scales, fontsize=10)
# X-axis: show every Nth layer label to avoid crowding
step = max(1, len(all_layers) // 15)
ax.set_xticks(range(0, len(all_layers), step))
ax.set_xticklabels([str(all_layers[i]) for i in range(0, len(all_layers), step)], fontsize=8)
ax.set_title(label, fontsize=12, fontweight='bold')
ax.set_xlabel('Layer Index', fontsize=10)
fig.colorbar(im, ax=ax, label='Cosine Similarity', shrink=0.8)
fig.suptitle(
f'{model_type.upper()} - Similarity Evolution (Layer x Scale)',
fontsize=15, fontweight='bold', y=1.01
)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved evolution heatmap: {save_path}")
# ============================================================================
# Model Configurations
# ============================================================================
MODEL_CONFIGS = {
'molmo': {
'vanilla': 'allenai/Molmo-7B-O-0924',
'80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared',
'400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared',
'800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared',
'2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared',
},
'nvila': {
'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B',
'80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221',
'400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221',
'800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221',
'2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632',
'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model',
},
'qwen': {
'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct',
'80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221',
'400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221',
'800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221',
'2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517',
},
}
# ============================================================================
# Main
# ============================================================================
def main():
parser = argparse.ArgumentParser(description='Experiment 2-A (Modified): Embedding Space Analysis')
parser.add_argument('--data_path', type=str,
default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv')
parser.add_argument('--model_type', type=str, required=True,
choices=['molmo', 'nvila', 'qwen'])
parser.add_argument('--scales', type=str, nargs='+',
default=['vanilla', '80k', '400k', '800k', '2m'])
parser.add_argument('--output_dir', type=str,
default='/data/shared/Qwen/experiments/exp2a_modified/results_all_layers')
parser.add_argument('--samples_per_category', type=int, default=200)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--seed', type=int, default=42)
args = parser.parse_args()
# Auto-include roborefer for nvila if not already specified
if args.model_type == 'nvila' and 'roborefer' not in args.scales:
args.scales.append('roborefer')
# Set random seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
# Create output directory
output_dir = os.path.join(args.output_dir, args.model_type)
os.makedirs(output_dir, exist_ok=True)
# Load and modify data
logger.info("\n=== Loading & Modifying EmbSpatialBench Data ===")
data = load_and_modify_data(args.data_path, args.samples_per_category, args.seed)
results_list = []
cross_scale_data = {} # scale -> {layer_idx -> sim_df}
model_configs = MODEL_CONFIGS[args.model_type]
for scale in args.scales:
if scale not in model_configs:
logger.warning(f"Scale {scale} not available for {args.model_type}, skipping...")
continue
model_path = model_configs[scale]
if not os.path.exists(model_path) and not model_path.startswith('Qwen/') and not model_path.startswith('allenai/'):
logger.warning(f"Model path not found: {model_path}, skipping...")
continue
logger.info(f"\n=== Processing {args.model_type} - {scale} ===")
logger.info(f"Model path: {model_path}")
try:
extractor = get_extractor(
args.model_type,
model_path,
scale=scale,
device=args.device,
)
num_layers = len(extractor.target_layers)
# Extract representations for ALL layers in one pass
all_layer_reps = extract_all_layer_representations(extractor, data)
# Compute similarity matrices for all layers
scale_sims = {}
model_name = f"{args.model_type}_{scale}"
for layer_idx in sorted(all_layer_reps.keys()):
sim_df = compute_similarity_matrix(all_layer_reps[layer_idx])
scale_sims[layer_idx] = sim_df
results = analyze_hypothesis(sim_df, model_name)
results['layer_idx'] = layer_idx
results_list.append(results)
# Save CSV for every layer
sim_df.to_csv(os.path.join(output_dir, f'similarity_{scale}_L{layer_idx}.csv'))
cross_scale_data[scale] = scale_sims
logger.info(f" Computed similarity matrices for {len(scale_sims)} layers")
# Save heatmaps for representative layers only (to avoid hundreds of files)
rep_layers = get_representative_layers(sorted(scale_sims.keys()))
logger.info(f" Saving heatmaps for representative layers: {rep_layers}")
for layer_idx in rep_layers:
sim_df = scale_sims[layer_idx]
plot_similarity_heatmap(
sim_df,
f'{args.model_type.upper()} ({scale}) - Layer {layer_idx}/{num_layers-1}',
os.path.join(output_dir, f'heatmap_{scale}_L{layer_idx}.png')
)
# Per-scale trajectory plot
plot_similarity_trajectories(
scale_sims,
f'{args.model_type.upper()} ({scale})',
os.path.join(output_dir, f'trajectory_{scale}.png')
)
extractor.cleanup()
except Exception as e:
logger.error(f"Failed to process {args.model_type} - {scale}: {e}")
import traceback
traceback.print_exc()
continue
# Cross-scale comparison plots
if len(cross_scale_data) > 1:
plot_cross_scale_trajectories(
cross_scale_data,
args.model_type,
os.path.join(output_dir, 'trajectory_cross_scale.png')
)
plot_similarity_evolution_heatmap(
cross_scale_data,
args.model_type,
os.path.join(output_dir, 'evolution_heatmap.png')
)
# Save results summary
if results_list:
results_df = pd.DataFrame(results_list)
results_df.to_csv(os.path.join(output_dir, 'results_summary.csv'), index=False)
logger.info("\n=== Analysis Complete ===")
logger.info(f"Results saved to: {output_dir}")
logger.info(f"Total: {len(results_list)} (layer, scale) combinations across {len(cross_scale_data)} scales")
if __name__ == '__main__':
main()