Commit ·
52496f8
0
Parent(s):
Squash history (QC grids added; final preview state)
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +60 -0
- LICENSE +42 -0
- README.md +172 -0
- README_preview.md +174 -0
- dataset_frames.json +0 -0
- load_tedwb1k.py +327 -0
- metadata/base/-2Dj9M71JAc.jpg +3 -0
- metadata/base/-E97Kgi0sR4.jpg +3 -0
- metadata/base/-FOCpMAww28.jpg +3 -0
- metadata/base/-H1tUMRJoeo-Scenes.jpg +3 -0
- metadata/base/-H1tUMRJoeo.jpg +3 -0
- metadata/base/-I3e6Mkfp7M.jpg +3 -0
- metadata/base/-MTRxRO5SRA.jpg +3 -0
- metadata/base/-RkhAP0_ms4.jpg +3 -0
- metadata/base/-Z-ul0GzzM4.jpg +3 -0
- metadata/base/-aouBn7IKIo.jpg +3 -0
- metadata/base/-eBUcBfkVCo.jpg +3 -0
- metadata/base/-hY9QSdaReY.jpg +3 -0
- metadata/base/-mhe7COLiR0.jpg +3 -0
- metadata/base/-moW9jvvMr4.jpg +3 -0
- metadata/base/-nKdufEaL8k.jpg +3 -0
- metadata/base/-vZXgApsPCQ.jpg +3 -0
- metadata/base/-vqV-gHa2FE.jpg +3 -0
- metadata/base/0-FkPxSc_M4.jpg +3 -0
- metadata/base/05jJodDVJRQ.jpg +3 -0
- metadata/base/08ZWROqoTZo.jpg +3 -0
- metadata/base/0DHywidLX6A.jpg +3 -0
- metadata/base/0FQXicAGy5U.jpg +3 -0
- metadata/base/0Fi83BHQsMA.jpg +3 -0
- metadata/base/0JGarsZE1rk.jpg +3 -0
- metadata/base/0K5OO2ybueM.jpg +3 -0
- metadata/base/0MD4Ymjyc2I.jpg +3 -0
- metadata/base/0NV1KdWRHck.jpg +3 -0
- metadata/base/0PAy1zBtTbw.jpg +3 -0
- metadata/base/0R9zjn9BBvA.jpg +3 -0
- metadata/base/0SkdP36wiAU.jpg +3 -0
- metadata/base/0bRocfcPhHU.jpg +3 -0
- metadata/base/0d6iSvF1UmA.jpg +3 -0
- metadata/base/0g0S34XE2b8.jpg +3 -0
- metadata/base/0g2WE1qXiKM.jpg +3 -0
- metadata/base/0gMCZFHv9v8.jpg +3 -0
- metadata/base/0gks6ceq4eQ.jpg +3 -0
- metadata/base/0iIh5YYDR2o.jpg +3 -0
- metadata/base/0txtVkBUdSQ.jpg +3 -0
- metadata/base/0ygtX2nyexo.jpg +3 -0
- metadata/base/13rqtiAPISY.jpg +3 -0
- metadata/base/16cM-RFid9U.jpg +3 -0
- metadata/base/16p9YRF0l-g.jpg +3 -0
- metadata/base/18zvlz5CxPE.jpg +3 -0
- metadata/base/1AT5klu_yAQ.jpg +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.avro filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
# Audio files - uncompressed
|
| 40 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
# Audio files - compressed
|
| 44 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
# Image files - uncompressed
|
| 50 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
# Image files - compressed
|
| 55 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
# Video files - compressed
|
| 59 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC-BY-NC-ND-4.0)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2026 neil
|
| 4 |
+
|
| 5 |
+
This dataset is licensed under the Creative Commons Attribution-NonCommercial-NoDerivatives
|
| 6 |
+
4.0 International License. To view a copy of this license, visit
|
| 7 |
+
|
| 8 |
+
https://creativecommons.org/licenses/by-nc-nd/4.0/
|
| 9 |
+
|
| 10 |
+
or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
|
| 11 |
+
|
| 12 |
+
You are free to:
|
| 13 |
+
- Share — copy and redistribute the material in any medium or format
|
| 14 |
+
|
| 15 |
+
Under the following terms:
|
| 16 |
+
- Attribution — You must give appropriate credit, provide a link to the
|
| 17 |
+
license, and indicate if changes were made. You may do so in any reasonable
|
| 18 |
+
manner, but not in any way that suggests the licensor endorses you or your use.
|
| 19 |
+
- NonCommercial — You may not use the material for commercial purposes.
|
| 20 |
+
- NoDerivatives — If you remix, transform, or build upon the material, you may
|
| 21 |
+
not distribute the modified material.
|
| 22 |
+
- No additional restrictions — You may not apply legal terms or technological
|
| 23 |
+
measures that legally restrict others from doing anything the license permits.
|
| 24 |
+
|
| 25 |
+
------------------------------------------------------------------------------
|
| 26 |
+
SOURCE-MATERIAL NOTICE
|
| 27 |
+
------------------------------------------------------------------------------
|
| 28 |
+
This dataset is derived from videos published on https://www.ted.com which are
|
| 29 |
+
themselves licensed under CC-BY-NC-ND 4.0 by TED Conferences, LLC. The tracking
|
| 30 |
+
parameters, JPG frames, and alpha mattes in this dataset are derivative works of
|
| 31 |
+
those videos. This dataset matches the upstream license (CC-BY-NC-ND 4.0) and
|
| 32 |
+
inherits the same restrictions on the source material:
|
| 33 |
+
|
| 34 |
+
- You may NOT use this dataset for commercial purposes.
|
| 35 |
+
- You MUST credit "TED" as the source of the original video content in any
|
| 36 |
+
publication, demo, or derivative work.
|
| 37 |
+
- You SHOULD link back to the original TED talk page for any individual
|
| 38 |
+
speaker whose data you use in a publication, demo, or derivative work.
|
| 39 |
+
- You may NOT redistribute modified or derivative versions of this dataset.
|
| 40 |
+
|
| 41 |
+
This notice does not constitute legal advice. If your intended use is unclear,
|
| 42 |
+
consult a lawyer or contact TED directly.
|
README.md
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-nc-nd-4.0
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
pretty_name: TEDWB1k-preview
|
| 6 |
+
size_categories:
|
| 7 |
+
- 1K<n<10K
|
| 8 |
+
task_categories:
|
| 9 |
+
- other
|
| 10 |
+
tags:
|
| 11 |
+
- 3d-human
|
| 12 |
+
- smpl-x
|
| 13 |
+
- flame
|
| 14 |
+
- avatar
|
| 15 |
+
- gaussian-splatting
|
| 16 |
+
- video
|
| 17 |
+
- motion-capture
|
| 18 |
+
- ted
|
| 19 |
+
- preview
|
| 20 |
+
configs:
|
| 21 |
+
- config_name: subjects
|
| 22 |
+
data_files:
|
| 23 |
+
- split: train
|
| 24 |
+
path: metadata/subjects_train.parquet
|
| 25 |
+
- split: train_subset_x1
|
| 26 |
+
path: metadata/subjects_train_subset_x1.parquet
|
| 27 |
+
- split: train_subset_x12
|
| 28 |
+
path: metadata/subjects_train_subset_x12.parquet
|
| 29 |
+
- split: train_val
|
| 30 |
+
path: metadata/subjects_train_val.parquet
|
| 31 |
+
- split: test
|
| 32 |
+
path: metadata/subjects_test.parquet
|
| 33 |
+
---
|
| 34 |
+
|
| 35 |
+
# TEDWB1k-preview
|
| 36 |
+
|
| 37 |
+
> ⚠️ **This is the public preview of [`initialneil/TEDWB1k`](https://huggingface.co/datasets/initialneil/TEDWB1k).**
|
| 38 |
+
> The full dataset (1,431 TED-talk speakers, ~120 GB) is hosted at the gated repo above.
|
| 39 |
+
> This preview repo exists so the HuggingFace **Dataset Viewer** can render the
|
| 40 |
+
> per-subject thumbnails and tracking grids without going through the gated EULA flow.
|
| 41 |
+
>
|
| 42 |
+
> If you want the **full training data** (frames + mattes + per-frame SMPL-X / FLAME
|
| 43 |
+
> tracking for all 1,431 subjects), go to the gated main repo, accept the agreement,
|
| 44 |
+
> and use [`load_tedwb1k.py`](https://huggingface.co/datasets/initialneil/TEDWB1k/blob/main/load_tedwb1k.py).
|
| 45 |
+
|
| 46 |
+
## What's in this preview
|
| 47 |
+
|
| 48 |
+
This repo is **schema-identical** to the gated main repo, but the heavy
|
| 49 |
+
per-subject data (`frames.tar`, `mattes.tar`, tracking pickles) is included
|
| 50 |
+
**only for the 12 subjects in `train_subset_x12`** as a working sample. For
|
| 51 |
+
the other 1,419 subjects, only the metadata and QC visualizations are
|
| 52 |
+
present (so the viewer table still lists all 1,431 entries).
|
| 53 |
+
|
| 54 |
+
| What | Subjects | Size |
|
| 55 |
+
|---|---:|---:|
|
| 56 |
+
| Per-split parquets with embedded source-frame thumbnails | 1,431 | ~210 MB |
|
| 57 |
+
| `metadata/previews/<id>.jpg` (1024×1024 source frames) | 1,431 | ~210 MB |
|
| 58 |
+
| `metadata/ehm/<id>.jpg` (full-res SMPL-X overlay grids) | 1,431 | ~17.6 GB |
|
| 59 |
+
| `metadata/flame/<id>.jpg` (full-res FLAME overlay grids) | 1,431 | ~8.0 GB |
|
| 60 |
+
| `metadata/base/<id>.jpg` (full-res PIXIE+Sapiens grids) | 1,431 | ~5.2 GB |
|
| 61 |
+
| **Per-subject heavy data** (frames.tar + mattes.tar + tracking) | **12** | ~540 MB |
|
| 62 |
+
| **Total preview** | | **~32 GB** |
|
| 63 |
+
|
| 64 |
+
The HF Dataset Viewer above renders 5 tabs (`train`, `train_subset_x1`,
|
| 65 |
+
`train_subset_x12`, `train_val`, `test`) with one row per subject, the
|
| 66 |
+
per-subject frame and shot counts, and a thumbnail of the first source
|
| 67 |
+
frame. Each thumbnail is the actual `shots_images/<id>/<first_shot>/000000.jpg`
|
| 68 |
+
that the tracker consumed.
|
| 69 |
+
|
| 70 |
+
## Quick start
|
| 71 |
+
|
| 72 |
+
If you only want to play with one of the 12 sample subjects (no agreement
|
| 73 |
+
required):
|
| 74 |
+
|
| 75 |
+
```bash
|
| 76 |
+
pip install huggingface_hub
|
| 77 |
+
|
| 78 |
+
python -c "
|
| 79 |
+
from huggingface_hub import snapshot_download
|
| 80 |
+
snapshot_download(
|
| 81 |
+
'initialneil/TEDWB1k-preview',
|
| 82 |
+
repo_type='dataset',
|
| 83 |
+
allow_patterns='subjects/-2Dj9M71JAc/*',
|
| 84 |
+
local_dir='./tedwb1k_x1',
|
| 85 |
+
)
|
| 86 |
+
"
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
That gives you tracking pickles + `frames.tar` + `mattes.tar` for one
|
| 90 |
+
sample subject in a few seconds. To turn it into the 5-file bundle that
|
| 91 |
+
[HolisticAvatar](https://github.com/initialneil/HolisticAvatar)'s
|
| 92 |
+
`TrackedData` expects, use the same `load_tedwb1k.py` from the main repo:
|
| 93 |
+
|
| 94 |
+
```bash
|
| 95 |
+
wget https://huggingface.co/datasets/initialneil/TEDWB1k/raw/main/load_tedwb1k.py
|
| 96 |
+
python load_tedwb1k.py --split train_subset_x1 --out ./tedwb1k_x1 \
|
| 97 |
+
--repo_id initialneil/TEDWB1k-preview
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
For the **full 1,361-subject training set**, request access at the
|
| 101 |
+
[gated main repo](https://huggingface.co/datasets/initialneil/TEDWB1k).
|
| 102 |
+
|
| 103 |
+
## Per-subject visualizations
|
| 104 |
+
|
| 105 |
+
Each of the 1,431 subjects has 4 standalone visualization files under
|
| 106 |
+
`metadata/`:
|
| 107 |
+
|
| 108 |
+
- `metadata/previews/<id>.jpg` — clean 1024×1024 source frame (the first
|
| 109 |
+
frame of the first shot). Embedded in the parquet preview column too.
|
| 110 |
+
- `metadata/ehm/<id>.jpg` — full-resolution SMPL-X overlay grid from the
|
| 111 |
+
final tracking stage (large vertical contact sheet, ~13 MB).
|
| 112 |
+
- `metadata/flame/<id>.jpg` — FLAME overlay grid from the intermediate
|
| 113 |
+
face-fitting stage (~6 MB).
|
| 114 |
+
- `metadata/base/<id>.jpg` — stage-1 PIXIE+Sapiens overlay grid (~4 MB).
|
| 115 |
+
|
| 116 |
+
You can fetch any single subject's visualization with one
|
| 117 |
+
`hf_hub_download` call:
|
| 118 |
+
|
| 119 |
+
```python
|
| 120 |
+
from huggingface_hub import hf_hub_download
|
| 121 |
+
path = hf_hub_download(
|
| 122 |
+
'initialneil/TEDWB1k-preview',
|
| 123 |
+
'metadata/ehm/05jJodDVJRQ.jpg',
|
| 124 |
+
repo_type='dataset',
|
| 125 |
+
)
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
## Splits
|
| 129 |
+
|
| 130 |
+
Same as the main repo:
|
| 131 |
+
|
| 132 |
+
| Split | Subjects | Notes |
|
| 133 |
+
|---|---:|---|
|
| 134 |
+
| `train_subset_x1` | 1 | tiny single-subject overfit (⊂ `train`) |
|
| 135 |
+
| `train_subset_x12` | 12 | small overfit (⊂ `train`) — **the only subjects with downloadable heavy data in this preview** |
|
| 136 |
+
| `train_val` | 20 | training monitor (⊂ `train`) |
|
| 137 |
+
| `test` | 70 | identity-disjoint evaluation |
|
| 138 |
+
| `train` | 1,361 | full training pool |
|
| 139 |
+
| **total** | **1,431** | |
|
| 140 |
+
|
| 141 |
+
`train` (1,361) and `test` (70) are identity-disjoint and together cover
|
| 142 |
+
all 1,431 subjects. `train_subset_x1`, `train_subset_x12`, and `train_val`
|
| 143 |
+
are subsets of `train`.
|
| 144 |
+
|
| 145 |
+
## Why two repos?
|
| 146 |
+
|
| 147 |
+
HuggingFace's Dataset Viewer cannot render tabs/thumbnails for **gated**
|
| 148 |
+
datasets — the worker that computes split names runs without a user
|
| 149 |
+
identity and can't satisfy the gating EULA. The full TEDWB1k is gated for
|
| 150 |
+
TED-content compliance, so to keep the viewer working we mirror the
|
| 151 |
+
metadata + a 12-subject sample to this public preview repo.
|
| 152 |
+
|
| 153 |
+
For full discussion see this thread:
|
| 154 |
+
<https://discuss.huggingface.co/t/after-gated-user-access-was-enabled-the-huggingface-not-showing-dataset-viewer/157333>
|
| 155 |
+
|
| 156 |
+
## License
|
| 157 |
+
|
| 158 |
+
[**CC-BY-NC-ND 4.0**](https://creativecommons.org/licenses/by-nc-nd/4.0/) —
|
| 159 |
+
same as the main repo. Non-commercial research use only. Attribution
|
| 160 |
+
required. **No derivatives** — you may not distribute modified or remixed
|
| 161 |
+
versions of this dataset.
|
| 162 |
+
|
| 163 |
+
The tracking parameters, JPG frames, and mattes are all derived works of
|
| 164 |
+
TED talk videos that are themselves CC-BY-NC-ND on ted.com. This dataset
|
| 165 |
+
matches the upstream license to remain compatible with TED's source
|
| 166 |
+
restrictions.
|
| 167 |
+
|
| 168 |
+
## Links
|
| 169 |
+
|
| 170 |
+
- **Full gated dataset**: <https://huggingface.co/datasets/initialneil/TEDWB1k>
|
| 171 |
+
- Tracking pipeline: <https://github.com/initialneil/HolisticTracker>
|
| 172 |
+
- HolisticAvatar (downstream model): <https://github.com/initialneil/HolisticAvatar>
|
README_preview.md
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-nc-nd-4.0
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
pretty_name: TEDWB1k-preview
|
| 6 |
+
size_categories:
|
| 7 |
+
- 1K<n<10K
|
| 8 |
+
task_categories:
|
| 9 |
+
- other
|
| 10 |
+
tags:
|
| 11 |
+
- 3d-human
|
| 12 |
+
- smpl-x
|
| 13 |
+
- flame
|
| 14 |
+
- avatar
|
| 15 |
+
- gaussian-splatting
|
| 16 |
+
- video
|
| 17 |
+
- motion-capture
|
| 18 |
+
- ted
|
| 19 |
+
- preview
|
| 20 |
+
configs:
|
| 21 |
+
- config_name: subjects
|
| 22 |
+
data_files:
|
| 23 |
+
- split: train
|
| 24 |
+
path: metadata/subjects_train.parquet
|
| 25 |
+
- split: train_subset_x1
|
| 26 |
+
path: metadata/subjects_train_subset_x1.parquet
|
| 27 |
+
- split: train_subset_x12
|
| 28 |
+
path: metadata/subjects_train_subset_x12.parquet
|
| 29 |
+
- split: train_val
|
| 30 |
+
path: metadata/subjects_train_val.parquet
|
| 31 |
+
- split: test
|
| 32 |
+
path: metadata/subjects_test.parquet
|
| 33 |
+
---
|
| 34 |
+
|
| 35 |
+
# TEDWB1k-preview
|
| 36 |
+
|
| 37 |
+
> ⚠️ **This is the public preview of [`initialneil/TEDWB1k`](https://huggingface.co/datasets/initialneil/TEDWB1k).**
|
| 38 |
+
> The full dataset (1,431 TED-talk speakers, ~120 GB) is hosted at the gated repo above.
|
| 39 |
+
> This preview repo exists so the HuggingFace **Dataset Viewer** can render the
|
| 40 |
+
> per-subject thumbnails and tracking grids without going through the gated EULA flow.
|
| 41 |
+
>
|
| 42 |
+
> If you want the **full training data** (frames + mattes + per-frame SMPL-X / FLAME
|
| 43 |
+
> tracking for all 1,431 subjects), go to the gated main repo, accept the agreement,
|
| 44 |
+
> and use [`load_tedwb1k.py`](https://huggingface.co/datasets/initialneil/TEDWB1k/blob/main/load_tedwb1k.py).
|
| 45 |
+
|
| 46 |
+
## What's in this preview
|
| 47 |
+
|
| 48 |
+
This repo is **schema-identical** to the gated main repo, but it's a lightweight
|
| 49 |
+
**catalog + sample** rather than a full mirror:
|
| 50 |
+
|
| 51 |
+
| What | Subjects | Size |
|
| 52 |
+
|---|---:|---:|
|
| 53 |
+
| Per-split parquets with embedded source-frame thumbnails | 1,431 | ~210 MB |
|
| 54 |
+
| `metadata/previews/<id>.jpg` (1024×1024 source frames) | 1,431 | ~210 MB |
|
| 55 |
+
| **Per-subject heavy data** (frames.tar + mattes.tar + tracking) | **12** | ~540 MB |
|
| 56 |
+
| **Total preview** | | **~1 GB** |
|
| 57 |
+
|
| 58 |
+
The HF Dataset Viewer above renders 5 tabs (`train`, `train_subset_x1`,
|
| 59 |
+
`train_subset_x12`, `train_val`, `test`) with one row per subject, the
|
| 60 |
+
per-subject frame and shot counts, and a thumbnail of the first source
|
| 61 |
+
frame (the actual `shots_images/<id>/<first_shot>/000000.jpg` that the
|
| 62 |
+
tracker consumed).
|
| 63 |
+
|
| 64 |
+
For full-resolution per-subject tracking visualizations
|
| 65 |
+
(`metadata/ehm/<id>.jpg`, `metadata/flame/<id>.jpg`, `metadata/base/<id>.jpg`),
|
| 66 |
+
go to the gated main repo — they're fetchable per-subject without downloading
|
| 67 |
+
the heavy `frames.tar`/`mattes.tar`.
|
| 68 |
+
|
| 69 |
+
## Quick start
|
| 70 |
+
|
| 71 |
+
If you only want to play with one of the 12 sample subjects (no agreement
|
| 72 |
+
required):
|
| 73 |
+
|
| 74 |
+
```bash
|
| 75 |
+
pip install huggingface_hub
|
| 76 |
+
|
| 77 |
+
python -c "
|
| 78 |
+
from huggingface_hub import snapshot_download
|
| 79 |
+
snapshot_download(
|
| 80 |
+
'initialneil/TEDWB1k-preview',
|
| 81 |
+
repo_type='dataset',
|
| 82 |
+
allow_patterns='subjects/-2Dj9M71JAc/*',
|
| 83 |
+
local_dir='./tedwb1k_x1',
|
| 84 |
+
)
|
| 85 |
+
"
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
That gives you tracking pickles + `frames.tar` + `mattes.tar` for one
|
| 89 |
+
sample subject in a few seconds. To turn it into the 5-file bundle that
|
| 90 |
+
[HolisticAvatar](https://github.com/initialneil/HolisticAvatar)'s
|
| 91 |
+
`TrackedData` expects, use the same `load_tedwb1k.py` from the main repo:
|
| 92 |
+
|
| 93 |
+
```bash
|
| 94 |
+
wget https://huggingface.co/datasets/initialneil/TEDWB1k/raw/main/load_tedwb1k.py
|
| 95 |
+
python load_tedwb1k.py --split train_subset_x1 --out ./tedwb1k_x1 \
|
| 96 |
+
--repo_id initialneil/TEDWB1k-preview
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
For the **full 1,361-subject training set**, request access at the
|
| 100 |
+
[gated main repo](https://huggingface.co/datasets/initialneil/TEDWB1k).
|
| 101 |
+
|
| 102 |
+
## Per-subject visualizations
|
| 103 |
+
|
| 104 |
+
In this preview, each of the 1,431 subjects has **one** standalone
|
| 105 |
+
visualization file:
|
| 106 |
+
|
| 107 |
+
- `metadata/previews/<id>.jpg` — clean 1024×1024 source frame (the first
|
| 108 |
+
frame of the first shot). Also embedded in the parquet `preview` column
|
| 109 |
+
so the HF Dataset Viewer renders it inline.
|
| 110 |
+
|
| 111 |
+
For full-resolution **SMPL-X / FLAME / PIXIE+Sapiens overlay grids**, head
|
| 112 |
+
to the gated main repo where each subject also has:
|
| 113 |
+
|
| 114 |
+
- `metadata/ehm/<id>.jpg` — final SMPL-X overlay grid (~13 MB)
|
| 115 |
+
- `metadata/flame/<id>.jpg` — intermediate FLAME overlay grid (~6 MB)
|
| 116 |
+
- `metadata/base/<id>.jpg` — stage-1 PIXIE+Sapiens overlay grid (~4 MB)
|
| 117 |
+
|
| 118 |
+
You can fetch any single subject's full-res visualization from the main
|
| 119 |
+
repo (after accepting the gating EULA) with one `hf_hub_download` call:
|
| 120 |
+
|
| 121 |
+
```python
|
| 122 |
+
from huggingface_hub import hf_hub_download
|
| 123 |
+
path = hf_hub_download(
|
| 124 |
+
'initialneil/TEDWB1k',
|
| 125 |
+
'metadata/ehm/05jJodDVJRQ.jpg',
|
| 126 |
+
repo_type='dataset',
|
| 127 |
+
)
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
## Splits
|
| 131 |
+
|
| 132 |
+
Same as the main repo:
|
| 133 |
+
|
| 134 |
+
| Split | Subjects | Notes |
|
| 135 |
+
|---|---:|---|
|
| 136 |
+
| `train_subset_x1` | 1 | tiny single-subject overfit (⊂ `train`) |
|
| 137 |
+
| `train_subset_x12` | 12 | small overfit (⊂ `train`) — **the only subjects with downloadable heavy data in this preview** |
|
| 138 |
+
| `train_val` | 20 | training monitor (⊂ `train`) |
|
| 139 |
+
| `test` | 70 | identity-disjoint evaluation |
|
| 140 |
+
| `train` | 1,361 | full training pool |
|
| 141 |
+
| **total** | **1,431** | |
|
| 142 |
+
|
| 143 |
+
`train` (1,361) and `test` (70) are identity-disjoint and together cover
|
| 144 |
+
all 1,431 subjects. `train_subset_x1`, `train_subset_x12`, and `train_val`
|
| 145 |
+
are subsets of `train`.
|
| 146 |
+
|
| 147 |
+
## Why two repos?
|
| 148 |
+
|
| 149 |
+
HuggingFace's Dataset Viewer cannot render tabs/thumbnails for **gated**
|
| 150 |
+
datasets — the worker that computes split names runs without a user
|
| 151 |
+
identity and can't satisfy the gating EULA. The full TEDWB1k is gated for
|
| 152 |
+
TED-content compliance, so to keep the viewer working we mirror the
|
| 153 |
+
metadata + a 12-subject sample to this public preview repo.
|
| 154 |
+
|
| 155 |
+
For full discussion see this thread:
|
| 156 |
+
<https://discuss.huggingface.co/t/after-gated-user-access-was-enabled-the-huggingface-not-showing-dataset-viewer/157333>
|
| 157 |
+
|
| 158 |
+
## License
|
| 159 |
+
|
| 160 |
+
[**CC-BY-NC-ND 4.0**](https://creativecommons.org/licenses/by-nc-nd/4.0/) —
|
| 161 |
+
same as the main repo. Non-commercial research use only. Attribution
|
| 162 |
+
required. **No derivatives** — you may not distribute modified or remixed
|
| 163 |
+
versions of this dataset.
|
| 164 |
+
|
| 165 |
+
The tracking parameters, JPG frames, and mattes are all derived works of
|
| 166 |
+
TED talk videos that are themselves CC-BY-NC-ND on ted.com. This dataset
|
| 167 |
+
matches the upstream license to remain compatible with TED's source
|
| 168 |
+
restrictions.
|
| 169 |
+
|
| 170 |
+
## Links
|
| 171 |
+
|
| 172 |
+
- **Full gated dataset**: <https://huggingface.co/datasets/initialneil/TEDWB1k>
|
| 173 |
+
- Tracking pipeline: <https://github.com/initialneil/HolisticTracker>
|
| 174 |
+
- HolisticAvatar (downstream model): <https://github.com/initialneil/HolisticAvatar>
|
dataset_frames.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
load_tedwb1k.py
ADDED
|
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
"""Download and assemble a TEDWB1k split for HolisticAvatar.
|
| 3 |
+
|
| 4 |
+
Usage examples
|
| 5 |
+
--------------
|
| 6 |
+
# Smallest possible test (1 subject, ~80 MB):
|
| 7 |
+
python load_tedwb1k.py --split train_subset_x1 --out ~/data/tedwb1k_x1
|
| 8 |
+
|
| 9 |
+
# 12-subject overfit set (~1 GB):
|
| 10 |
+
python load_tedwb1k.py --split train_subset_x12 --out ~/data/tedwb1k_x12
|
| 11 |
+
|
| 12 |
+
# 20-subject training-monitor set (~2 GB, subset of train):
|
| 13 |
+
python load_tedwb1k.py --split train_val --out ~/data/tedwb1k_train_val
|
| 14 |
+
|
| 15 |
+
# 70-subject test set (~10 GB):
|
| 16 |
+
python load_tedwb1k.py --split test --out ~/data/tedwb1k_test
|
| 17 |
+
|
| 18 |
+
# Full training pool (1361 subjects, ~190 GB):
|
| 19 |
+
python load_tedwb1k.py --split train --out ~/data/tedwb1k_train
|
| 20 |
+
|
| 21 |
+
# Use already-downloaded HF cache, skip re-download:
|
| 22 |
+
python load_tedwb1k.py --split test --out ~/data/tedwb1k_test --hf_cache ~/.cache/huggingface
|
| 23 |
+
|
| 24 |
+
After it finishes, point your training config at --out:
|
| 25 |
+
DATASET.data_path: <out>
|
| 26 |
+
|
| 27 |
+
The directory `<out>` will contain the same five files HolisticAvatar's
|
| 28 |
+
`TrackedData.__init__` expects:
|
| 29 |
+
optim_tracking_ehm.pkl # merged from per-subject pkls
|
| 30 |
+
id_share_params.pkl # merged from per-subject pkls
|
| 31 |
+
videos_info.json # merged from per-subject jsons
|
| 32 |
+
dataset_frames.json # copied from the release root
|
| 33 |
+
extra_info.json # generated locally with absolute frames_root/matte_root
|
| 34 |
+
|
| 35 |
+
…plus `frames_root/<vid>/...` and `matte_root/<vid>/...` containing the per-shot
|
| 36 |
+
JPGs that the dataloader reads at training time.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
from __future__ import annotations
|
| 40 |
+
|
| 41 |
+
import argparse
|
| 42 |
+
import json
|
| 43 |
+
import os
|
| 44 |
+
import pickle
|
| 45 |
+
import sys
|
| 46 |
+
import time
|
| 47 |
+
from pathlib import Path
|
| 48 |
+
|
| 49 |
+
REPO_ID = "initialneil/TEDWB1k"
|
| 50 |
+
REPO_TYPE = "dataset"
|
| 51 |
+
|
| 52 |
+
SPLIT_FILES = {
|
| 53 |
+
"train": "train.txt",
|
| 54 |
+
"train_subset_x1": "train_subset_x1.txt",
|
| 55 |
+
"train_subset_x12": "train_subset_x12.txt",
|
| 56 |
+
"train_val": "train_val.txt",
|
| 57 |
+
"test": "test.txt",
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
# Per-subject files we always need to feed TrackedData:
|
| 61 |
+
PER_SUBJECT_TRACKING = [
|
| 62 |
+
"tracking/optim_tracking_ehm.pkl",
|
| 63 |
+
"tracking/id_share_params.pkl",
|
| 64 |
+
"tracking/videos_info.json",
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def parse_args() -> argparse.Namespace:
|
| 69 |
+
ap = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
|
| 70 |
+
ap.add_argument("--split", required=True, choices=list(SPLIT_FILES.keys()),
|
| 71 |
+
help="Which subject set to download.")
|
| 72 |
+
ap.add_argument("--out", required=True, type=Path,
|
| 73 |
+
help="Local directory to assemble the dataset into.")
|
| 74 |
+
ap.add_argument("--repo_id", default=REPO_ID,
|
| 75 |
+
help=f"HuggingFace dataset repo id (default: {REPO_ID}).")
|
| 76 |
+
ap.add_argument("--hf_cache", type=Path, default=None,
|
| 77 |
+
help="Override HuggingFace cache dir (default: ~/.cache/huggingface).")
|
| 78 |
+
ap.add_argument("--keep_tars", action="store_true",
|
| 79 |
+
help="Keep frames.tar / mattes.tar after extraction (default: delete to save space).")
|
| 80 |
+
ap.add_argument("--skip_download", action="store_true",
|
| 81 |
+
help="Skip download step (assume HF cache is already populated).")
|
| 82 |
+
ap.add_argument("--skip_extract", action="store_true",
|
| 83 |
+
help="Skip frames/mattes extraction (just merge tracking pkls).")
|
| 84 |
+
ap.add_argument("--local_snapshot", type=Path, default=None,
|
| 85 |
+
help="Skip HF download entirely; treat this local dir as the snapshot. "
|
| 86 |
+
"Useful for testing build_release.py output before upload, or if "
|
| 87 |
+
"the user already has a clone of the repo.")
|
| 88 |
+
return ap.parse_args()
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def read_subject_ids(
|
| 92 |
+
split_name: str,
|
| 93 |
+
repo_id: str,
|
| 94 |
+
hf_cache: Path | None,
|
| 95 |
+
local_snapshot: Path | None,
|
| 96 |
+
) -> list[str]:
|
| 97 |
+
"""Fetch and parse the split txt for the chosen split."""
|
| 98 |
+
txt_name = SPLIT_FILES[split_name]
|
| 99 |
+
if local_snapshot is not None:
|
| 100 |
+
local_txt = local_snapshot / txt_name
|
| 101 |
+
if not local_txt.exists():
|
| 102 |
+
raise FileNotFoundError(f"{local_txt} not found in local snapshot")
|
| 103 |
+
print(f"[1/5] Reading split file {txt_name} from local snapshot ...")
|
| 104 |
+
else:
|
| 105 |
+
from huggingface_hub import hf_hub_download
|
| 106 |
+
print(f"[1/5] Fetching split file {txt_name} from {repo_id} ...")
|
| 107 |
+
local_txt = Path(hf_hub_download(
|
| 108 |
+
repo_id=repo_id,
|
| 109 |
+
filename=txt_name,
|
| 110 |
+
repo_type=REPO_TYPE,
|
| 111 |
+
cache_dir=str(hf_cache) if hf_cache else None,
|
| 112 |
+
))
|
| 113 |
+
ids = [ln.strip() for ln in Path(local_txt).read_text().splitlines() if ln.strip()]
|
| 114 |
+
print(f" {len(ids)} subject ids in '{split_name}'")
|
| 115 |
+
return ids
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def download_subject_files(
|
| 119 |
+
repo_id: str,
|
| 120 |
+
hf_cache: Path | None,
|
| 121 |
+
subject_ids: list[str],
|
| 122 |
+
) -> Path:
|
| 123 |
+
"""Snapshot only the subject files we need. Returns the snapshot root."""
|
| 124 |
+
from huggingface_hub import snapshot_download
|
| 125 |
+
|
| 126 |
+
patterns: list[str] = []
|
| 127 |
+
for vid in subject_ids:
|
| 128 |
+
for f in PER_SUBJECT_TRACKING:
|
| 129 |
+
patterns.append(f"subjects/{vid}/{f}")
|
| 130 |
+
patterns.append(f"subjects/{vid}/frames.tar")
|
| 131 |
+
patterns.append(f"subjects/{vid}/mattes.tar")
|
| 132 |
+
# Always grab dataset_frames.json (used for train/valid frame split inside TrackedData)
|
| 133 |
+
patterns.append("dataset_frames.json")
|
| 134 |
+
|
| 135 |
+
print(f"[2/5] snapshot_download from {repo_id} ({len(patterns)} patterns) ...")
|
| 136 |
+
snap = snapshot_download(
|
| 137 |
+
repo_id=repo_id,
|
| 138 |
+
repo_type=REPO_TYPE,
|
| 139 |
+
allow_patterns=patterns,
|
| 140 |
+
cache_dir=str(hf_cache) if hf_cache else None,
|
| 141 |
+
)
|
| 142 |
+
print(f" snapshot at: {snap}")
|
| 143 |
+
return Path(snap)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def merge_tracking(
|
| 147 |
+
snapshot: Path,
|
| 148 |
+
subject_ids: list[str],
|
| 149 |
+
out: Path,
|
| 150 |
+
) -> None:
|
| 151 |
+
"""Merge per-subject tracking files into the 5-file TrackedData bundle.
|
| 152 |
+
|
| 153 |
+
Per-subject `optim_tracking_ehm.pkl` and `id_share_params.pkl` are FLAT
|
| 154 |
+
(no top-level video_id key) — the merger wraps them under each video_id
|
| 155 |
+
so the result matches the format produced by `merge_ehmx_dataset.py`.
|
| 156 |
+
"""
|
| 157 |
+
print(f"[3/5] Merging tracking files for {len(subject_ids)} subjects ...")
|
| 158 |
+
merged_optim: dict = {}
|
| 159 |
+
merged_id_share: dict = {}
|
| 160 |
+
merged_videos_info: dict = {}
|
| 161 |
+
n_frames_total = 0
|
| 162 |
+
missing: list[str] = []
|
| 163 |
+
|
| 164 |
+
t0 = time.time()
|
| 165 |
+
for i, vid in enumerate(subject_ids, 1):
|
| 166 |
+
sub = snapshot / "subjects" / vid / "tracking"
|
| 167 |
+
opt_p = sub / "optim_tracking_ehm.pkl"
|
| 168 |
+
id_p = sub / "id_share_params.pkl"
|
| 169 |
+
vi_p = sub / "videos_info.json"
|
| 170 |
+
if not (opt_p.exists() and id_p.exists() and vi_p.exists()):
|
| 171 |
+
missing.append(vid)
|
| 172 |
+
continue
|
| 173 |
+
with open(opt_p, "rb") as f:
|
| 174 |
+
merged_optim[vid] = pickle.load(f)
|
| 175 |
+
with open(id_p, "rb") as f:
|
| 176 |
+
merged_id_share[vid] = pickle.load(f)
|
| 177 |
+
with open(vi_p, "r") as f:
|
| 178 |
+
vi = json.load(f)
|
| 179 |
+
merged_videos_info.update(vi)
|
| 180 |
+
n_frames_total += len(merged_optim[vid])
|
| 181 |
+
if i % 50 == 0 or i == len(subject_ids):
|
| 182 |
+
elapsed = time.time() - t0
|
| 183 |
+
print(f" merged {i}/{len(subject_ids)} subjects "
|
| 184 |
+
f"({n_frames_total} frames so far, {elapsed:.1f}s)")
|
| 185 |
+
|
| 186 |
+
if missing:
|
| 187 |
+
print(f" WARNING: {len(missing)} subjects had missing tracking files: {missing[:5]}...", file=sys.stderr)
|
| 188 |
+
|
| 189 |
+
out.mkdir(parents=True, exist_ok=True)
|
| 190 |
+
with open(out / "optim_tracking_ehm.pkl", "wb") as f:
|
| 191 |
+
pickle.dump(merged_optim, f, protocol=pickle.HIGHEST_PROTOCOL)
|
| 192 |
+
with open(out / "id_share_params.pkl", "wb") as f:
|
| 193 |
+
pickle.dump(merged_id_share, f, protocol=pickle.HIGHEST_PROTOCOL)
|
| 194 |
+
with open(out / "videos_info.json", "w") as f:
|
| 195 |
+
json.dump(merged_videos_info, f)
|
| 196 |
+
print(f" wrote optim_tracking_ehm.pkl ({n_frames_total} frames)")
|
| 197 |
+
print(f" wrote id_share_params.pkl ({len(merged_id_share)} subjects)")
|
| 198 |
+
print(f" wrote videos_info.json ({len(merged_videos_info)} subjects)")
|
| 199 |
+
|
| 200 |
+
# Copy dataset_frames.json from snapshot (used by train/valid splits inside TrackedData)
|
| 201 |
+
src_frames = snapshot / "dataset_frames.json"
|
| 202 |
+
if src_frames.exists():
|
| 203 |
+
out_frames = out / "dataset_frames.json"
|
| 204 |
+
out_frames.write_text(src_frames.read_text())
|
| 205 |
+
print(f" copied dataset_frames.json")
|
| 206 |
+
else:
|
| 207 |
+
print(" WARNING: dataset_frames.json missing in snapshot — train/valid splits won't work")
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def setup_frame_dirs(
|
| 211 |
+
snapshot: Path,
|
| 212 |
+
subject_ids: list[str],
|
| 213 |
+
out: Path,
|
| 214 |
+
keep_tars: bool,
|
| 215 |
+
) -> tuple[Path, Path]:
|
| 216 |
+
"""Materialize per-subject frames + mattes under out/frames_root, out/matte_root.
|
| 217 |
+
|
| 218 |
+
Handles both layouts:
|
| 219 |
+
- Snapshot has `subjects/<vid>/frames.tar` (HF upload case): extract into
|
| 220 |
+
out/frames_root/<vid>/ and (optionally) delete the tar to save disk.
|
| 221 |
+
- Snapshot has `subjects/<vid>/frames/` as a real dir or symlink (local
|
| 222 |
+
build_release.py output, or pre-extracted clone): symlink it from
|
| 223 |
+
out/frames_root/<vid> -> resolved frames dir.
|
| 224 |
+
"""
|
| 225 |
+
import tarfile
|
| 226 |
+
|
| 227 |
+
frames_root = out / "frames_root"
|
| 228 |
+
matte_root = out / "matte_root"
|
| 229 |
+
frames_root.mkdir(parents=True, exist_ok=True)
|
| 230 |
+
matte_root.mkdir(parents=True, exist_ok=True)
|
| 231 |
+
|
| 232 |
+
print(f"[4/5] Setting up frames + mattes for {len(subject_ids)} subjects ...")
|
| 233 |
+
n_extracted = n_linked = n_missing = 0
|
| 234 |
+
for vid in subject_ids:
|
| 235 |
+
sub = snapshot / "subjects" / vid
|
| 236 |
+
for kind, dest_root in [("frames", frames_root), ("mattes", matte_root)]:
|
| 237 |
+
tar_path = sub / f"{kind}.tar"
|
| 238 |
+
dir_path = sub / kind
|
| 239 |
+
target = dest_root / vid
|
| 240 |
+
if target.exists() or target.is_symlink():
|
| 241 |
+
continue # idempotent
|
| 242 |
+
if tar_path.exists():
|
| 243 |
+
target.mkdir(parents=True, exist_ok=True)
|
| 244 |
+
with tarfile.open(tar_path, "r") as tar:
|
| 245 |
+
tar.extractall(path=target)
|
| 246 |
+
if not keep_tars:
|
| 247 |
+
tar_path.unlink()
|
| 248 |
+
n_extracted += 1
|
| 249 |
+
elif dir_path.exists():
|
| 250 |
+
# Resolve through any symlinks so the link in out/ is stable.
|
| 251 |
+
target.symlink_to(dir_path.resolve())
|
| 252 |
+
n_linked += 1
|
| 253 |
+
else:
|
| 254 |
+
print(f" WARNING: {vid}/{kind} not in snapshot (no .tar, no dir)", file=sys.stderr)
|
| 255 |
+
n_missing += 1
|
| 256 |
+
print(f" extracted={n_extracted // 2} linked={n_linked // 2} missing={n_missing}")
|
| 257 |
+
return frames_root, matte_root
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def write_extra_info(out: Path, frames_root: Path, matte_root: Path) -> None:
|
| 261 |
+
"""Write extra_info.json with absolute paths to the local extracted dirs."""
|
| 262 |
+
print("[5/5] Writing extra_info.json ...")
|
| 263 |
+
extra = {
|
| 264 |
+
"frames_root": str(frames_root.resolve()),
|
| 265 |
+
"matte_root": str(matte_root.resolve()),
|
| 266 |
+
"pshuman_root": None,
|
| 267 |
+
}
|
| 268 |
+
with open(out / "extra_info.json", "w") as f:
|
| 269 |
+
json.dump(extra, f, indent=2)
|
| 270 |
+
print(f" frames_root = {extra['frames_root']}")
|
| 271 |
+
print(f" matte_root = {extra['matte_root']}")
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def main() -> int:
|
| 275 |
+
args = parse_args()
|
| 276 |
+
out = args.out.expanduser().resolve()
|
| 277 |
+
local_snapshot = args.local_snapshot.expanduser().resolve() if args.local_snapshot else None
|
| 278 |
+
|
| 279 |
+
if local_snapshot is None:
|
| 280 |
+
try:
|
| 281 |
+
import huggingface_hub # noqa: F401
|
| 282 |
+
except ImportError:
|
| 283 |
+
print("ERROR: huggingface_hub is required. Install with:", file=sys.stderr)
|
| 284 |
+
print(" pip install huggingface_hub", file=sys.stderr)
|
| 285 |
+
return 2
|
| 286 |
+
|
| 287 |
+
subject_ids = read_subject_ids(args.split, args.repo_id, args.hf_cache, local_snapshot)
|
| 288 |
+
|
| 289 |
+
if local_snapshot is not None:
|
| 290 |
+
print(f"[2/5] Using local snapshot at {local_snapshot} (no download)")
|
| 291 |
+
snapshot = local_snapshot
|
| 292 |
+
elif args.skip_download:
|
| 293 |
+
print("[2/5] --skip_download: assuming local snapshot is already populated")
|
| 294 |
+
from huggingface_hub import snapshot_download
|
| 295 |
+
snapshot = Path(snapshot_download(
|
| 296 |
+
repo_id=args.repo_id,
|
| 297 |
+
repo_type=REPO_TYPE,
|
| 298 |
+
allow_patterns=["dataset_frames.json"],
|
| 299 |
+
cache_dir=str(args.hf_cache) if args.hf_cache else None,
|
| 300 |
+
))
|
| 301 |
+
else:
|
| 302 |
+
snapshot = download_subject_files(
|
| 303 |
+
repo_id=args.repo_id,
|
| 304 |
+
hf_cache=args.hf_cache,
|
| 305 |
+
subject_ids=subject_ids,
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
merge_tracking(snapshot, subject_ids, out)
|
| 309 |
+
|
| 310 |
+
if args.skip_extract:
|
| 311 |
+
print("[4/5] --skip_extract: skipping frames/mattes setup")
|
| 312 |
+
frames_root = out / "frames_root"
|
| 313 |
+
matte_root = out / "matte_root"
|
| 314 |
+
else:
|
| 315 |
+
frames_root, matte_root = setup_frame_dirs(snapshot, subject_ids, out, args.keep_tars)
|
| 316 |
+
|
| 317 |
+
write_extra_info(out, frames_root, matte_root)
|
| 318 |
+
|
| 319 |
+
print()
|
| 320 |
+
print("=" * 60)
|
| 321 |
+
print(f"DONE. Local dataset assembled at: {out}")
|
| 322 |
+
print(f" Point training config at: DATASET.data_path: {out}")
|
| 323 |
+
return 0
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
if __name__ == "__main__":
|
| 327 |
+
raise SystemExit(main())
|
metadata/base/-2Dj9M71JAc.jpg
ADDED
|
Git LFS Details
|
metadata/base/-E97Kgi0sR4.jpg
ADDED
|
Git LFS Details
|
metadata/base/-FOCpMAww28.jpg
ADDED
|
Git LFS Details
|
metadata/base/-H1tUMRJoeo-Scenes.jpg
ADDED
|
Git LFS Details
|
metadata/base/-H1tUMRJoeo.jpg
ADDED
|
Git LFS Details
|
metadata/base/-I3e6Mkfp7M.jpg
ADDED
|
Git LFS Details
|
metadata/base/-MTRxRO5SRA.jpg
ADDED
|
Git LFS Details
|
metadata/base/-RkhAP0_ms4.jpg
ADDED
|
Git LFS Details
|
metadata/base/-Z-ul0GzzM4.jpg
ADDED
|
Git LFS Details
|
metadata/base/-aouBn7IKIo.jpg
ADDED
|
Git LFS Details
|
metadata/base/-eBUcBfkVCo.jpg
ADDED
|
Git LFS Details
|
metadata/base/-hY9QSdaReY.jpg
ADDED
|
Git LFS Details
|
metadata/base/-mhe7COLiR0.jpg
ADDED
|
Git LFS Details
|
metadata/base/-moW9jvvMr4.jpg
ADDED
|
Git LFS Details
|
metadata/base/-nKdufEaL8k.jpg
ADDED
|
Git LFS Details
|
metadata/base/-vZXgApsPCQ.jpg
ADDED
|
Git LFS Details
|
metadata/base/-vqV-gHa2FE.jpg
ADDED
|
Git LFS Details
|
metadata/base/0-FkPxSc_M4.jpg
ADDED
|
Git LFS Details
|
metadata/base/05jJodDVJRQ.jpg
ADDED
|
Git LFS Details
|
metadata/base/08ZWROqoTZo.jpg
ADDED
|
Git LFS Details
|
metadata/base/0DHywidLX6A.jpg
ADDED
|
Git LFS Details
|
metadata/base/0FQXicAGy5U.jpg
ADDED
|
Git LFS Details
|
metadata/base/0Fi83BHQsMA.jpg
ADDED
|
Git LFS Details
|
metadata/base/0JGarsZE1rk.jpg
ADDED
|
Git LFS Details
|
metadata/base/0K5OO2ybueM.jpg
ADDED
|
Git LFS Details
|
metadata/base/0MD4Ymjyc2I.jpg
ADDED
|
Git LFS Details
|
metadata/base/0NV1KdWRHck.jpg
ADDED
|
Git LFS Details
|
metadata/base/0PAy1zBtTbw.jpg
ADDED
|
Git LFS Details
|
metadata/base/0R9zjn9BBvA.jpg
ADDED
|
Git LFS Details
|
metadata/base/0SkdP36wiAU.jpg
ADDED
|
Git LFS Details
|
metadata/base/0bRocfcPhHU.jpg
ADDED
|
Git LFS Details
|
metadata/base/0d6iSvF1UmA.jpg
ADDED
|
Git LFS Details
|
metadata/base/0g0S34XE2b8.jpg
ADDED
|
Git LFS Details
|
metadata/base/0g2WE1qXiKM.jpg
ADDED
|
Git LFS Details
|
metadata/base/0gMCZFHv9v8.jpg
ADDED
|
Git LFS Details
|
metadata/base/0gks6ceq4eQ.jpg
ADDED
|
Git LFS Details
|
metadata/base/0iIh5YYDR2o.jpg
ADDED
|
Git LFS Details
|
metadata/base/0txtVkBUdSQ.jpg
ADDED
|
Git LFS Details
|
metadata/base/0ygtX2nyexo.jpg
ADDED
|
Git LFS Details
|
metadata/base/13rqtiAPISY.jpg
ADDED
|
Git LFS Details
|
metadata/base/16cM-RFid9U.jpg
ADDED
|
Git LFS Details
|
metadata/base/16p9YRF0l-g.jpg
ADDED
|
Git LFS Details
|
metadata/base/18zvlz5CxPE.jpg
ADDED
|
Git LFS Details
|
metadata/base/1AT5klu_yAQ.jpg
ADDED
|
Git LFS Details
|