| import io |
| import json |
| import random |
| import tarfile |
| from contextlib import contextmanager |
| import tempfile |
| import pickle |
| import base64 |
|
|
| try: |
| import h5py |
| import imageio |
| import requests |
| import zstandard as zstd |
| from datasets import load_dataset |
| from huggingface_hub import hf_hub_url, HfFolder |
| except ImportError: |
| print( |
| "Please setup your environment with e.g." |
| " `pip install h5py 'imageio[ffmpeg]' requests zstandard datasets huggingface_hub`" |
| ) |
| raise |
|
|
| TOKEN = HfFolder.get_token() |
| REPO = "allenai/molmobot-data" |
| TASK_CONFIGS = [ |
| "DoorOpeningDataGenConfig", |
| "FrankaPickAndPlaceColorOmniCamConfig", |
| "FrankaPickAndPlaceNextToOmniCamConfig", |
| "FrankaPickAndPlaceOmniCamConfig", |
| "FrankaPickOmniCamConfig", |
| "RBY1OpenDataGenConfig", |
| "RBY1PickAndPlaceDataGenConfig", |
| "RBY1PickDataGenConfig", |
| "FrankaPickAndPlaceOmniCamConfig_ObjectBackfill", |
| ] |
| SPLIT = "train" |
|
|
|
|
| @contextmanager |
| def stream_pkg( |
| entry: dict, config_name: str, buffer_size: int = 8192, repo_id: str = REPO |
| ): |
| """ |
| Streams a single compressed archive (tar.zst) from within a shard using |
| an HTTP Range request. Each shard contains multiple archives packed |
| contiguously; the entry's offset and size identify the byte range for |
| one archive. This context manager exposes an open tarfile. |
| """ |
| url = hf_hub_url( |
| repo_id=repo_id, |
| filename=f"{config_name}/{SPLIT}_shards/{entry['shard_id']:05d}.tar", |
| repo_type="dataset", |
| revision="main", |
| ) |
|
|
| start = entry["offset"] |
| end = start + entry["size"] - 1 |
| headers = {"Range": f"bytes={start}-{end}"} |
| if TOKEN: |
| headers["Authorization"] = f"Bearer {TOKEN}" |
|
|
| with requests.get(url, headers=headers, stream=True) as response: |
| response.raise_for_status() |
|
|
| dctx = zstd.ZstdDecompressor() |
|
|
| with dctx.stream_reader(response.raw) as reader: |
| buffered = io.BufferedReader(reader, buffer_size=buffer_size) |
|
|
| with tarfile.open(fileobj=buffered, mode="r|") as tar: |
| yield tar |
|
|
|
|
| def collect_scene_data(entry: dict, config_name: str, keep_videos=True): |
| """Collects scene identification info, and h5 and mp4 buffers from the archive.""" |
|
|
| def keep_scene_info(name): |
| return f'part{entry["part"]}_{name.split("/")[0]}' |
|
|
| def mp4_info(name): |
| info = name.split("/")[1].split(".")[0] |
| traj_cam_info, batch_info = info.split("_batch_") |
| traj_cam_info = traj_cam_info.split("_") |
| traj_idx = int(traj_cam_info[1]) |
| cam = "_".join(traj_cam_info[2:]) |
| return batch_info, traj_idx, cam |
|
|
| def h5_info(name): |
| info = name.split("/")[1].split(".")[0] |
| batch_info = info.split("_batch_")[1] |
| return batch_info |
|
|
| scene_info = None |
| batch_to_h5_and_ep_to_cams = {} |
| with stream_pkg(entry, config_name) as tar: |
| for member in tar: |
| if member.name.endswith(".h5"): |
| batch = h5_info(member.name) |
|
|
| if scene_info is None: |
| scene_info = keep_scene_info(member.name) |
|
|
| if batch not in batch_to_h5_and_ep_to_cams: |
| batch_to_h5_and_ep_to_cams[batch] = {} |
|
|
| batch_to_h5_and_ep_to_cams[batch]["h5"] = tar.extractfile(member).read() |
|
|
| elif member.name.endswith(".mp4") and keep_videos: |
| batch, ep, cam = mp4_info(member.name) |
|
|
| if batch not in batch_to_h5_and_ep_to_cams: |
| batch_to_h5_and_ep_to_cams[batch] = {} |
| if ep not in batch_to_h5_and_ep_to_cams[batch]: |
| batch_to_h5_and_ep_to_cams[batch][ep] = {} |
|
|
| batch_to_h5_and_ep_to_cams[batch][ep][cam] = tar.extractfile( |
| member |
| ).read() |
|
|
| return scene_info, batch_to_h5_and_ep_to_cams |
|
|
|
|
| def pop_frames(h5_and_ep_to_cams, eid): |
| """Pops video buffers for an episode and decodes them into numpy frame arrays.""" |
|
|
| frames = {} |
| for cam in list(h5_and_ep_to_cams[eid].keys()): |
| vid_data = h5_and_ep_to_cams[eid].pop(cam) |
| |
| with tempfile.NamedTemporaryFile(suffix=".mp4") as tmp: |
| tmp.write(vid_data) |
| |
| tmp.flush() |
| tmp.seek(0) |
| with imageio.get_reader(tmp.name, format="ffmpeg") as vid: |
| frames[cam] = [frame for frame in vid] |
|
|
| return frames |
|
|
|
|
| def decode_datum(datum): |
| """Decodes a null-padded JSON bytes array into a Python object.""" |
| return json.loads(datum.tobytes().decode("utf-8").rstrip("\x00")) |
|
|
|
|
| class Config: |
| """Generic placeholder for unpickling config classes.""" |
|
|
| def __init__(self, *args, **kwargs): |
| self._args = args |
| self._kwargs = kwargs |
|
|
| def __setstate__(self, state): |
| self.__dict__ = state["__dict__"] |
|
|
| def __repr__(self): |
| return f"{self.__dict__}" |
|
|
|
|
| class ConfigUnpickler(pickle.Unpickler): |
| """Unpickler that resolves numpy/pathlib classes normally and stubs everything else.""" |
|
|
| def find_class(self, module, name): |
| if module.startswith(("numpy", "pathlib")): |
| import importlib |
|
|
| loaded = importlib.import_module(module) |
| return getattr(loaded, name) |
|
|
| return Config |
|
|
|
|
| def safe_load_config(encoded_frozen_config): |
| """ |
| Deserializes a base64-encoded pickled config, replacing unknown |
| classes with a generic Config placeholder. Returns None on failure. |
| """ |
| try: |
| return ConfigUnpickler( |
| io.BytesIO(base64.b64decode(encoded_frozen_config)) |
| ).load() |
| except Exception as e: |
| print(f"Warning: config pickle could not be fully loaded: {e}") |
| return None |
|
|
|
|
| def iterate_data(entry: dict, config_name: str): |
| """ |
| Yields per-step dicts (traj_info, step, action, camera frames) |
| for all valid episodes in the given scene package. |
| """ |
| scene_info, batch_to_h5_and_ep_to_cams = collect_scene_data(entry, config_name) |
|
|
| for batch in list(batch_to_h5_and_ep_to_cams.keys()): |
| h5_and_ep_to_cams = batch_to_h5_and_ep_to_cams.pop(batch) |
| if "h5" not in h5_and_ep_to_cams: |
| |
| continue |
|
|
| h5 = h5_and_ep_to_cams["h5"] |
|
|
| with h5py.File(io.BytesIO(h5), "r") as f: |
| if "valid_traj_mask" in f.keys(): |
| valid_traj_mask = f["valid_traj_mask"][()] |
| else: |
| traj_keys = { |
| int(key.split("traj_")[-1]) |
| for key in f.keys() |
| if key.startswith("traj_") |
| } |
| valid_traj_mask = [ |
| True if idx in traj_keys else False |
| for idx in range(max(traj_keys) + 1) |
| ] |
|
|
| for eid, val in enumerate(valid_traj_mask): |
| if not val: |
| |
| continue |
|
|
| traj = f[f"traj_{eid}"] |
|
|
| obs_scene = json.loads(traj["obs_scene"][()].decode()) |
| obs_scene["config"] = safe_load_config(obs_scene.pop("frozen_config")) |
| obs_scene["scene_id"] = scene_info |
| obs_scene["traj_id"] = f"{batch}_ep{eid}" |
|
|
| actions = [ |
| decode_datum(action) |
| for action in traj["actions"]["commanded_action"][()] |
| ] |
|
|
| frames = pop_frames(h5_and_ep_to_cams, eid) |
|
|
| for fid, action in enumerate(actions): |
| yield { |
| "traj_info": obs_scene, |
| "step": fid, |
| "action": action, |
| **{cam: frames[cam][fid] for cam in frames}, |
| } |
|
|
|
|
| def main(): |
| grand_size = 0 |
| grand_inflated = 0 |
| grand_largest = 0 |
| grand_parts = 0 |
|
|
| for config in TASK_CONFIGS: |
| ds = load_dataset(REPO, name=config, split=f"{SPLIT}_pkgs") |
|
|
| current_size = sum(row["size"] for row in ds) |
| current_inflated = sum(row["inflated_size"] for row in ds) |
| current_largest = max(row["inflated_size"] for row in ds) |
| current_parts = len(set(row["part"] for row in ds)) |
|
|
| print(f"Task config {config}:") |
| print(f" Compressed: {current_size / 1024 ** 3:.2f} GiB") |
| print(f" Inflated: {current_inflated / 1024 ** 3:.2f} GiB") |
| print(f" Largest archive: {current_largest / 1024 ** 3:.2f} GiB") |
| print(f" Collection parts: {current_parts}") |
|
|
| grand_size += current_size |
| grand_inflated += current_inflated |
| grand_largest = max(current_largest, grand_largest) |
| grand_parts += current_parts |
|
|
| parts = [[] for _ in range(current_parts)] |
| for entry in ds: |
| parts[entry["part"]].append(entry) |
| for part in parts: |
| random_scene_pkg = random.choice(part) |
| for it, item in enumerate(iterate_data(random_scene_pkg, config)): |
| if it == 1: |
| info = item.pop("traj_info") |
| scene_id = info["scene_id"] |
| traj_id = info["traj_id"] |
| task_type = info["task_type"] |
| task_description = info["task_description"] |
| robot_name = info["config"].robot_config.name |
| step = item.pop("step") |
| action_keys = sorted(item.pop("action").keys()) |
| cam_shapes = {cam: frame.shape for cam, frame in item.items()} |
| print( |
| f"{step=} {traj_id=} {scene_id=}" |
| f"\n {task_type=}" |
| f"\n {task_description=}" |
| f"\n {robot_name=}" |
| f"\n {action_keys=}" |
| f"\n {cam_shapes=}" |
| ) |
| break |
|
|
| print(f"TOTAL across {len(TASK_CONFIGS)} task configs:") |
| print(f" Compressed: {grand_size / 1024 ** 4:.2f} TiB") |
| print(f" Inflated: {grand_inflated / 1024 ** 4:.2f} TiB") |
| print(f" Largest archive: {grand_largest / 1024 ** 3:.2f} GiB") |
| print(f" Collection parts: {grand_parts}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
| print("DONE") |
|
|