initialneil commited on
Commit
52496f8
·
0 Parent(s):

Squash history (QC grids added; final preview state)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +60 -0
  2. LICENSE +42 -0
  3. README.md +172 -0
  4. README_preview.md +174 -0
  5. dataset_frames.json +0 -0
  6. load_tedwb1k.py +327 -0
  7. metadata/base/-2Dj9M71JAc.jpg +3 -0
  8. metadata/base/-E97Kgi0sR4.jpg +3 -0
  9. metadata/base/-FOCpMAww28.jpg +3 -0
  10. metadata/base/-H1tUMRJoeo-Scenes.jpg +3 -0
  11. metadata/base/-H1tUMRJoeo.jpg +3 -0
  12. metadata/base/-I3e6Mkfp7M.jpg +3 -0
  13. metadata/base/-MTRxRO5SRA.jpg +3 -0
  14. metadata/base/-RkhAP0_ms4.jpg +3 -0
  15. metadata/base/-Z-ul0GzzM4.jpg +3 -0
  16. metadata/base/-aouBn7IKIo.jpg +3 -0
  17. metadata/base/-eBUcBfkVCo.jpg +3 -0
  18. metadata/base/-hY9QSdaReY.jpg +3 -0
  19. metadata/base/-mhe7COLiR0.jpg +3 -0
  20. metadata/base/-moW9jvvMr4.jpg +3 -0
  21. metadata/base/-nKdufEaL8k.jpg +3 -0
  22. metadata/base/-vZXgApsPCQ.jpg +3 -0
  23. metadata/base/-vqV-gHa2FE.jpg +3 -0
  24. metadata/base/0-FkPxSc_M4.jpg +3 -0
  25. metadata/base/05jJodDVJRQ.jpg +3 -0
  26. metadata/base/08ZWROqoTZo.jpg +3 -0
  27. metadata/base/0DHywidLX6A.jpg +3 -0
  28. metadata/base/0FQXicAGy5U.jpg +3 -0
  29. metadata/base/0Fi83BHQsMA.jpg +3 -0
  30. metadata/base/0JGarsZE1rk.jpg +3 -0
  31. metadata/base/0K5OO2ybueM.jpg +3 -0
  32. metadata/base/0MD4Ymjyc2I.jpg +3 -0
  33. metadata/base/0NV1KdWRHck.jpg +3 -0
  34. metadata/base/0PAy1zBtTbw.jpg +3 -0
  35. metadata/base/0R9zjn9BBvA.jpg +3 -0
  36. metadata/base/0SkdP36wiAU.jpg +3 -0
  37. metadata/base/0bRocfcPhHU.jpg +3 -0
  38. metadata/base/0d6iSvF1UmA.jpg +3 -0
  39. metadata/base/0g0S34XE2b8.jpg +3 -0
  40. metadata/base/0g2WE1qXiKM.jpg +3 -0
  41. metadata/base/0gMCZFHv9v8.jpg +3 -0
  42. metadata/base/0gks6ceq4eQ.jpg +3 -0
  43. metadata/base/0iIh5YYDR2o.jpg +3 -0
  44. metadata/base/0txtVkBUdSQ.jpg +3 -0
  45. metadata/base/0ygtX2nyexo.jpg +3 -0
  46. metadata/base/13rqtiAPISY.jpg +3 -0
  47. metadata/base/16cM-RFid9U.jpg +3 -0
  48. metadata/base/16p9YRF0l-g.jpg +3 -0
  49. metadata/base/18zvlz5CxPE.jpg +3 -0
  50. metadata/base/1AT5klu_yAQ.jpg +3 -0
.gitattributes ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.avro filter=lfs diff=lfs merge=lfs -text
4
+ *.bin filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
7
+ *.ftz filter=lfs diff=lfs merge=lfs -text
8
+ *.gz filter=lfs diff=lfs merge=lfs -text
9
+ *.h5 filter=lfs diff=lfs merge=lfs -text
10
+ *.joblib filter=lfs diff=lfs merge=lfs -text
11
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
13
+ *.mds filter=lfs diff=lfs merge=lfs -text
14
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
15
+ *.model filter=lfs diff=lfs merge=lfs -text
16
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
17
+ *.npy filter=lfs diff=lfs merge=lfs -text
18
+ *.npz filter=lfs diff=lfs merge=lfs -text
19
+ *.onnx filter=lfs diff=lfs merge=lfs -text
20
+ *.ot filter=lfs diff=lfs merge=lfs -text
21
+ *.parquet filter=lfs diff=lfs merge=lfs -text
22
+ *.pb filter=lfs diff=lfs merge=lfs -text
23
+ *.pickle filter=lfs diff=lfs merge=lfs -text
24
+ *.pkl filter=lfs diff=lfs merge=lfs -text
25
+ *.pt filter=lfs diff=lfs merge=lfs -text
26
+ *.pth filter=lfs diff=lfs merge=lfs -text
27
+ *.rar filter=lfs diff=lfs merge=lfs -text
28
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
29
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
31
+ *.tar filter=lfs diff=lfs merge=lfs -text
32
+ *.tflite filter=lfs diff=lfs merge=lfs -text
33
+ *.tgz filter=lfs diff=lfs merge=lfs -text
34
+ *.wasm filter=lfs diff=lfs merge=lfs -text
35
+ *.xz filter=lfs diff=lfs merge=lfs -text
36
+ *.zip filter=lfs diff=lfs merge=lfs -text
37
+ *.zst filter=lfs diff=lfs merge=lfs -text
38
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
39
+ # Audio files - uncompressed
40
+ *.pcm filter=lfs diff=lfs merge=lfs -text
41
+ *.sam filter=lfs diff=lfs merge=lfs -text
42
+ *.raw filter=lfs diff=lfs merge=lfs -text
43
+ # Audio files - compressed
44
+ *.aac filter=lfs diff=lfs merge=lfs -text
45
+ *.flac filter=lfs diff=lfs merge=lfs -text
46
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
47
+ *.ogg filter=lfs diff=lfs merge=lfs -text
48
+ *.wav filter=lfs diff=lfs merge=lfs -text
49
+ # Image files - uncompressed
50
+ *.bmp filter=lfs diff=lfs merge=lfs -text
51
+ *.gif filter=lfs diff=lfs merge=lfs -text
52
+ *.png filter=lfs diff=lfs merge=lfs -text
53
+ *.tiff filter=lfs diff=lfs merge=lfs -text
54
+ # Image files - compressed
55
+ *.jpg filter=lfs diff=lfs merge=lfs -text
56
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
57
+ *.webp filter=lfs diff=lfs merge=lfs -text
58
+ # Video files - compressed
59
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
60
+ *.webm filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC-BY-NC-ND-4.0)
2
+
3
+ Copyright (c) 2026 neil
4
+
5
+ This dataset is licensed under the Creative Commons Attribution-NonCommercial-NoDerivatives
6
+ 4.0 International License. To view a copy of this license, visit
7
+
8
+ https://creativecommons.org/licenses/by-nc-nd/4.0/
9
+
10
+ or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
11
+
12
+ You are free to:
13
+ - Share — copy and redistribute the material in any medium or format
14
+
15
+ Under the following terms:
16
+ - Attribution — You must give appropriate credit, provide a link to the
17
+ license, and indicate if changes were made. You may do so in any reasonable
18
+ manner, but not in any way that suggests the licensor endorses you or your use.
19
+ - NonCommercial — You may not use the material for commercial purposes.
20
+ - NoDerivatives — If you remix, transform, or build upon the material, you may
21
+ not distribute the modified material.
22
+ - No additional restrictions — You may not apply legal terms or technological
23
+ measures that legally restrict others from doing anything the license permits.
24
+
25
+ ------------------------------------------------------------------------------
26
+ SOURCE-MATERIAL NOTICE
27
+ ------------------------------------------------------------------------------
28
+ This dataset is derived from videos published on https://www.ted.com which are
29
+ themselves licensed under CC-BY-NC-ND 4.0 by TED Conferences, LLC. The tracking
30
+ parameters, JPG frames, and alpha mattes in this dataset are derivative works of
31
+ those videos. This dataset matches the upstream license (CC-BY-NC-ND 4.0) and
32
+ inherits the same restrictions on the source material:
33
+
34
+ - You may NOT use this dataset for commercial purposes.
35
+ - You MUST credit "TED" as the source of the original video content in any
36
+ publication, demo, or derivative work.
37
+ - You SHOULD link back to the original TED talk page for any individual
38
+ speaker whose data you use in a publication, demo, or derivative work.
39
+ - You may NOT redistribute modified or derivative versions of this dataset.
40
+
41
+ This notice does not constitute legal advice. If your intended use is unclear,
42
+ consult a lawyer or contact TED directly.
README.md ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-nd-4.0
3
+ language:
4
+ - en
5
+ pretty_name: TEDWB1k-preview
6
+ size_categories:
7
+ - 1K<n<10K
8
+ task_categories:
9
+ - other
10
+ tags:
11
+ - 3d-human
12
+ - smpl-x
13
+ - flame
14
+ - avatar
15
+ - gaussian-splatting
16
+ - video
17
+ - motion-capture
18
+ - ted
19
+ - preview
20
+ configs:
21
+ - config_name: subjects
22
+ data_files:
23
+ - split: train
24
+ path: metadata/subjects_train.parquet
25
+ - split: train_subset_x1
26
+ path: metadata/subjects_train_subset_x1.parquet
27
+ - split: train_subset_x12
28
+ path: metadata/subjects_train_subset_x12.parquet
29
+ - split: train_val
30
+ path: metadata/subjects_train_val.parquet
31
+ - split: test
32
+ path: metadata/subjects_test.parquet
33
+ ---
34
+
35
+ # TEDWB1k-preview
36
+
37
+ > ⚠️ **This is the public preview of [`initialneil/TEDWB1k`](https://huggingface.co/datasets/initialneil/TEDWB1k).**
38
+ > The full dataset (1,431 TED-talk speakers, ~120 GB) is hosted at the gated repo above.
39
+ > This preview repo exists so the HuggingFace **Dataset Viewer** can render the
40
+ > per-subject thumbnails and tracking grids without going through the gated EULA flow.
41
+ >
42
+ > If you want the **full training data** (frames + mattes + per-frame SMPL-X / FLAME
43
+ > tracking for all 1,431 subjects), go to the gated main repo, accept the agreement,
44
+ > and use [`load_tedwb1k.py`](https://huggingface.co/datasets/initialneil/TEDWB1k/blob/main/load_tedwb1k.py).
45
+
46
+ ## What's in this preview
47
+
48
+ This repo is **schema-identical** to the gated main repo, but the heavy
49
+ per-subject data (`frames.tar`, `mattes.tar`, tracking pickles) is included
50
+ **only for the 12 subjects in `train_subset_x12`** as a working sample. For
51
+ the other 1,419 subjects, only the metadata and QC visualizations are
52
+ present (so the viewer table still lists all 1,431 entries).
53
+
54
+ | What | Subjects | Size |
55
+ |---|---:|---:|
56
+ | Per-split parquets with embedded source-frame thumbnails | 1,431 | ~210 MB |
57
+ | `metadata/previews/<id>.jpg` (1024×1024 source frames) | 1,431 | ~210 MB |
58
+ | `metadata/ehm/<id>.jpg` (full-res SMPL-X overlay grids) | 1,431 | ~17.6 GB |
59
+ | `metadata/flame/<id>.jpg` (full-res FLAME overlay grids) | 1,431 | ~8.0 GB |
60
+ | `metadata/base/<id>.jpg` (full-res PIXIE+Sapiens grids) | 1,431 | ~5.2 GB |
61
+ | **Per-subject heavy data** (frames.tar + mattes.tar + tracking) | **12** | ~540 MB |
62
+ | **Total preview** | | **~32 GB** |
63
+
64
+ The HF Dataset Viewer above renders 5 tabs (`train`, `train_subset_x1`,
65
+ `train_subset_x12`, `train_val`, `test`) with one row per subject, the
66
+ per-subject frame and shot counts, and a thumbnail of the first source
67
+ frame. Each thumbnail is the actual `shots_images/<id>/<first_shot>/000000.jpg`
68
+ that the tracker consumed.
69
+
70
+ ## Quick start
71
+
72
+ If you only want to play with one of the 12 sample subjects (no agreement
73
+ required):
74
+
75
+ ```bash
76
+ pip install huggingface_hub
77
+
78
+ python -c "
79
+ from huggingface_hub import snapshot_download
80
+ snapshot_download(
81
+ 'initialneil/TEDWB1k-preview',
82
+ repo_type='dataset',
83
+ allow_patterns='subjects/-2Dj9M71JAc/*',
84
+ local_dir='./tedwb1k_x1',
85
+ )
86
+ "
87
+ ```
88
+
89
+ That gives you tracking pickles + `frames.tar` + `mattes.tar` for one
90
+ sample subject in a few seconds. To turn it into the 5-file bundle that
91
+ [HolisticAvatar](https://github.com/initialneil/HolisticAvatar)'s
92
+ `TrackedData` expects, use the same `load_tedwb1k.py` from the main repo:
93
+
94
+ ```bash
95
+ wget https://huggingface.co/datasets/initialneil/TEDWB1k/raw/main/load_tedwb1k.py
96
+ python load_tedwb1k.py --split train_subset_x1 --out ./tedwb1k_x1 \
97
+ --repo_id initialneil/TEDWB1k-preview
98
+ ```
99
+
100
+ For the **full 1,361-subject training set**, request access at the
101
+ [gated main repo](https://huggingface.co/datasets/initialneil/TEDWB1k).
102
+
103
+ ## Per-subject visualizations
104
+
105
+ Each of the 1,431 subjects has 4 standalone visualization files under
106
+ `metadata/`:
107
+
108
+ - `metadata/previews/<id>.jpg` — clean 1024×1024 source frame (the first
109
+ frame of the first shot). Embedded in the parquet preview column too.
110
+ - `metadata/ehm/<id>.jpg` — full-resolution SMPL-X overlay grid from the
111
+ final tracking stage (large vertical contact sheet, ~13 MB).
112
+ - `metadata/flame/<id>.jpg` — FLAME overlay grid from the intermediate
113
+ face-fitting stage (~6 MB).
114
+ - `metadata/base/<id>.jpg` — stage-1 PIXIE+Sapiens overlay grid (~4 MB).
115
+
116
+ You can fetch any single subject's visualization with one
117
+ `hf_hub_download` call:
118
+
119
+ ```python
120
+ from huggingface_hub import hf_hub_download
121
+ path = hf_hub_download(
122
+ 'initialneil/TEDWB1k-preview',
123
+ 'metadata/ehm/05jJodDVJRQ.jpg',
124
+ repo_type='dataset',
125
+ )
126
+ ```
127
+
128
+ ## Splits
129
+
130
+ Same as the main repo:
131
+
132
+ | Split | Subjects | Notes |
133
+ |---|---:|---|
134
+ | `train_subset_x1` | 1 | tiny single-subject overfit (⊂ `train`) |
135
+ | `train_subset_x12` | 12 | small overfit (⊂ `train`) — **the only subjects with downloadable heavy data in this preview** |
136
+ | `train_val` | 20 | training monitor (⊂ `train`) |
137
+ | `test` | 70 | identity-disjoint evaluation |
138
+ | `train` | 1,361 | full training pool |
139
+ | **total** | **1,431** | |
140
+
141
+ `train` (1,361) and `test` (70) are identity-disjoint and together cover
142
+ all 1,431 subjects. `train_subset_x1`, `train_subset_x12`, and `train_val`
143
+ are subsets of `train`.
144
+
145
+ ## Why two repos?
146
+
147
+ HuggingFace's Dataset Viewer cannot render tabs/thumbnails for **gated**
148
+ datasets — the worker that computes split names runs without a user
149
+ identity and can't satisfy the gating EULA. The full TEDWB1k is gated for
150
+ TED-content compliance, so to keep the viewer working we mirror the
151
+ metadata + a 12-subject sample to this public preview repo.
152
+
153
+ For full discussion see this thread:
154
+ <https://discuss.huggingface.co/t/after-gated-user-access-was-enabled-the-huggingface-not-showing-dataset-viewer/157333>
155
+
156
+ ## License
157
+
158
+ [**CC-BY-NC-ND 4.0**](https://creativecommons.org/licenses/by-nc-nd/4.0/) —
159
+ same as the main repo. Non-commercial research use only. Attribution
160
+ required. **No derivatives** — you may not distribute modified or remixed
161
+ versions of this dataset.
162
+
163
+ The tracking parameters, JPG frames, and mattes are all derived works of
164
+ TED talk videos that are themselves CC-BY-NC-ND on ted.com. This dataset
165
+ matches the upstream license to remain compatible with TED's source
166
+ restrictions.
167
+
168
+ ## Links
169
+
170
+ - **Full gated dataset**: <https://huggingface.co/datasets/initialneil/TEDWB1k>
171
+ - Tracking pipeline: <https://github.com/initialneil/HolisticTracker>
172
+ - HolisticAvatar (downstream model): <https://github.com/initialneil/HolisticAvatar>
README_preview.md ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-nd-4.0
3
+ language:
4
+ - en
5
+ pretty_name: TEDWB1k-preview
6
+ size_categories:
7
+ - 1K<n<10K
8
+ task_categories:
9
+ - other
10
+ tags:
11
+ - 3d-human
12
+ - smpl-x
13
+ - flame
14
+ - avatar
15
+ - gaussian-splatting
16
+ - video
17
+ - motion-capture
18
+ - ted
19
+ - preview
20
+ configs:
21
+ - config_name: subjects
22
+ data_files:
23
+ - split: train
24
+ path: metadata/subjects_train.parquet
25
+ - split: train_subset_x1
26
+ path: metadata/subjects_train_subset_x1.parquet
27
+ - split: train_subset_x12
28
+ path: metadata/subjects_train_subset_x12.parquet
29
+ - split: train_val
30
+ path: metadata/subjects_train_val.parquet
31
+ - split: test
32
+ path: metadata/subjects_test.parquet
33
+ ---
34
+
35
+ # TEDWB1k-preview
36
+
37
+ > ⚠️ **This is the public preview of [`initialneil/TEDWB1k`](https://huggingface.co/datasets/initialneil/TEDWB1k).**
38
+ > The full dataset (1,431 TED-talk speakers, ~120 GB) is hosted at the gated repo above.
39
+ > This preview repo exists so the HuggingFace **Dataset Viewer** can render the
40
+ > per-subject thumbnails and tracking grids without going through the gated EULA flow.
41
+ >
42
+ > If you want the **full training data** (frames + mattes + per-frame SMPL-X / FLAME
43
+ > tracking for all 1,431 subjects), go to the gated main repo, accept the agreement,
44
+ > and use [`load_tedwb1k.py`](https://huggingface.co/datasets/initialneil/TEDWB1k/blob/main/load_tedwb1k.py).
45
+
46
+ ## What's in this preview
47
+
48
+ This repo is **schema-identical** to the gated main repo, but it's a lightweight
49
+ **catalog + sample** rather than a full mirror:
50
+
51
+ | What | Subjects | Size |
52
+ |---|---:|---:|
53
+ | Per-split parquets with embedded source-frame thumbnails | 1,431 | ~210 MB |
54
+ | `metadata/previews/<id>.jpg` (1024×1024 source frames) | 1,431 | ~210 MB |
55
+ | **Per-subject heavy data** (frames.tar + mattes.tar + tracking) | **12** | ~540 MB |
56
+ | **Total preview** | | **~1 GB** |
57
+
58
+ The HF Dataset Viewer above renders 5 tabs (`train`, `train_subset_x1`,
59
+ `train_subset_x12`, `train_val`, `test`) with one row per subject, the
60
+ per-subject frame and shot counts, and a thumbnail of the first source
61
+ frame (the actual `shots_images/<id>/<first_shot>/000000.jpg` that the
62
+ tracker consumed).
63
+
64
+ For full-resolution per-subject tracking visualizations
65
+ (`metadata/ehm/<id>.jpg`, `metadata/flame/<id>.jpg`, `metadata/base/<id>.jpg`),
66
+ go to the gated main repo — they're fetchable per-subject without downloading
67
+ the heavy `frames.tar`/`mattes.tar`.
68
+
69
+ ## Quick start
70
+
71
+ If you only want to play with one of the 12 sample subjects (no agreement
72
+ required):
73
+
74
+ ```bash
75
+ pip install huggingface_hub
76
+
77
+ python -c "
78
+ from huggingface_hub import snapshot_download
79
+ snapshot_download(
80
+ 'initialneil/TEDWB1k-preview',
81
+ repo_type='dataset',
82
+ allow_patterns='subjects/-2Dj9M71JAc/*',
83
+ local_dir='./tedwb1k_x1',
84
+ )
85
+ "
86
+ ```
87
+
88
+ That gives you tracking pickles + `frames.tar` + `mattes.tar` for one
89
+ sample subject in a few seconds. To turn it into the 5-file bundle that
90
+ [HolisticAvatar](https://github.com/initialneil/HolisticAvatar)'s
91
+ `TrackedData` expects, use the same `load_tedwb1k.py` from the main repo:
92
+
93
+ ```bash
94
+ wget https://huggingface.co/datasets/initialneil/TEDWB1k/raw/main/load_tedwb1k.py
95
+ python load_tedwb1k.py --split train_subset_x1 --out ./tedwb1k_x1 \
96
+ --repo_id initialneil/TEDWB1k-preview
97
+ ```
98
+
99
+ For the **full 1,361-subject training set**, request access at the
100
+ [gated main repo](https://huggingface.co/datasets/initialneil/TEDWB1k).
101
+
102
+ ## Per-subject visualizations
103
+
104
+ In this preview, each of the 1,431 subjects has **one** standalone
105
+ visualization file:
106
+
107
+ - `metadata/previews/<id>.jpg` — clean 1024×1024 source frame (the first
108
+ frame of the first shot). Also embedded in the parquet `preview` column
109
+ so the HF Dataset Viewer renders it inline.
110
+
111
+ For full-resolution **SMPL-X / FLAME / PIXIE+Sapiens overlay grids**, head
112
+ to the gated main repo where each subject also has:
113
+
114
+ - `metadata/ehm/<id>.jpg` — final SMPL-X overlay grid (~13 MB)
115
+ - `metadata/flame/<id>.jpg` — intermediate FLAME overlay grid (~6 MB)
116
+ - `metadata/base/<id>.jpg` — stage-1 PIXIE+Sapiens overlay grid (~4 MB)
117
+
118
+ You can fetch any single subject's full-res visualization from the main
119
+ repo (after accepting the gating EULA) with one `hf_hub_download` call:
120
+
121
+ ```python
122
+ from huggingface_hub import hf_hub_download
123
+ path = hf_hub_download(
124
+ 'initialneil/TEDWB1k',
125
+ 'metadata/ehm/05jJodDVJRQ.jpg',
126
+ repo_type='dataset',
127
+ )
128
+ ```
129
+
130
+ ## Splits
131
+
132
+ Same as the main repo:
133
+
134
+ | Split | Subjects | Notes |
135
+ |---|---:|---|
136
+ | `train_subset_x1` | 1 | tiny single-subject overfit (⊂ `train`) |
137
+ | `train_subset_x12` | 12 | small overfit (⊂ `train`) — **the only subjects with downloadable heavy data in this preview** |
138
+ | `train_val` | 20 | training monitor (⊂ `train`) |
139
+ | `test` | 70 | identity-disjoint evaluation |
140
+ | `train` | 1,361 | full training pool |
141
+ | **total** | **1,431** | |
142
+
143
+ `train` (1,361) and `test` (70) are identity-disjoint and together cover
144
+ all 1,431 subjects. `train_subset_x1`, `train_subset_x12`, and `train_val`
145
+ are subsets of `train`.
146
+
147
+ ## Why two repos?
148
+
149
+ HuggingFace's Dataset Viewer cannot render tabs/thumbnails for **gated**
150
+ datasets — the worker that computes split names runs without a user
151
+ identity and can't satisfy the gating EULA. The full TEDWB1k is gated for
152
+ TED-content compliance, so to keep the viewer working we mirror the
153
+ metadata + a 12-subject sample to this public preview repo.
154
+
155
+ For full discussion see this thread:
156
+ <https://discuss.huggingface.co/t/after-gated-user-access-was-enabled-the-huggingface-not-showing-dataset-viewer/157333>
157
+
158
+ ## License
159
+
160
+ [**CC-BY-NC-ND 4.0**](https://creativecommons.org/licenses/by-nc-nd/4.0/) —
161
+ same as the main repo. Non-commercial research use only. Attribution
162
+ required. **No derivatives** — you may not distribute modified or remixed
163
+ versions of this dataset.
164
+
165
+ The tracking parameters, JPG frames, and mattes are all derived works of
166
+ TED talk videos that are themselves CC-BY-NC-ND on ted.com. This dataset
167
+ matches the upstream license to remain compatible with TED's source
168
+ restrictions.
169
+
170
+ ## Links
171
+
172
+ - **Full gated dataset**: <https://huggingface.co/datasets/initialneil/TEDWB1k>
173
+ - Tracking pipeline: <https://github.com/initialneil/HolisticTracker>
174
+ - HolisticAvatar (downstream model): <https://github.com/initialneil/HolisticAvatar>
dataset_frames.json ADDED
The diff for this file is too large to render. See raw diff
 
load_tedwb1k.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """Download and assemble a TEDWB1k split for HolisticAvatar.
3
+
4
+ Usage examples
5
+ --------------
6
+ # Smallest possible test (1 subject, ~80 MB):
7
+ python load_tedwb1k.py --split train_subset_x1 --out ~/data/tedwb1k_x1
8
+
9
+ # 12-subject overfit set (~1 GB):
10
+ python load_tedwb1k.py --split train_subset_x12 --out ~/data/tedwb1k_x12
11
+
12
+ # 20-subject training-monitor set (~2 GB, subset of train):
13
+ python load_tedwb1k.py --split train_val --out ~/data/tedwb1k_train_val
14
+
15
+ # 70-subject test set (~10 GB):
16
+ python load_tedwb1k.py --split test --out ~/data/tedwb1k_test
17
+
18
+ # Full training pool (1361 subjects, ~190 GB):
19
+ python load_tedwb1k.py --split train --out ~/data/tedwb1k_train
20
+
21
+ # Use already-downloaded HF cache, skip re-download:
22
+ python load_tedwb1k.py --split test --out ~/data/tedwb1k_test --hf_cache ~/.cache/huggingface
23
+
24
+ After it finishes, point your training config at --out:
25
+ DATASET.data_path: <out>
26
+
27
+ The directory `<out>` will contain the same five files HolisticAvatar's
28
+ `TrackedData.__init__` expects:
29
+ optim_tracking_ehm.pkl # merged from per-subject pkls
30
+ id_share_params.pkl # merged from per-subject pkls
31
+ videos_info.json # merged from per-subject jsons
32
+ dataset_frames.json # copied from the release root
33
+ extra_info.json # generated locally with absolute frames_root/matte_root
34
+
35
+ …plus `frames_root/<vid>/...` and `matte_root/<vid>/...` containing the per-shot
36
+ JPGs that the dataloader reads at training time.
37
+ """
38
+
39
+ from __future__ import annotations
40
+
41
+ import argparse
42
+ import json
43
+ import os
44
+ import pickle
45
+ import sys
46
+ import time
47
+ from pathlib import Path
48
+
49
+ REPO_ID = "initialneil/TEDWB1k"
50
+ REPO_TYPE = "dataset"
51
+
52
+ SPLIT_FILES = {
53
+ "train": "train.txt",
54
+ "train_subset_x1": "train_subset_x1.txt",
55
+ "train_subset_x12": "train_subset_x12.txt",
56
+ "train_val": "train_val.txt",
57
+ "test": "test.txt",
58
+ }
59
+
60
+ # Per-subject files we always need to feed TrackedData:
61
+ PER_SUBJECT_TRACKING = [
62
+ "tracking/optim_tracking_ehm.pkl",
63
+ "tracking/id_share_params.pkl",
64
+ "tracking/videos_info.json",
65
+ ]
66
+
67
+
68
+ def parse_args() -> argparse.Namespace:
69
+ ap = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
70
+ ap.add_argument("--split", required=True, choices=list(SPLIT_FILES.keys()),
71
+ help="Which subject set to download.")
72
+ ap.add_argument("--out", required=True, type=Path,
73
+ help="Local directory to assemble the dataset into.")
74
+ ap.add_argument("--repo_id", default=REPO_ID,
75
+ help=f"HuggingFace dataset repo id (default: {REPO_ID}).")
76
+ ap.add_argument("--hf_cache", type=Path, default=None,
77
+ help="Override HuggingFace cache dir (default: ~/.cache/huggingface).")
78
+ ap.add_argument("--keep_tars", action="store_true",
79
+ help="Keep frames.tar / mattes.tar after extraction (default: delete to save space).")
80
+ ap.add_argument("--skip_download", action="store_true",
81
+ help="Skip download step (assume HF cache is already populated).")
82
+ ap.add_argument("--skip_extract", action="store_true",
83
+ help="Skip frames/mattes extraction (just merge tracking pkls).")
84
+ ap.add_argument("--local_snapshot", type=Path, default=None,
85
+ help="Skip HF download entirely; treat this local dir as the snapshot. "
86
+ "Useful for testing build_release.py output before upload, or if "
87
+ "the user already has a clone of the repo.")
88
+ return ap.parse_args()
89
+
90
+
91
+ def read_subject_ids(
92
+ split_name: str,
93
+ repo_id: str,
94
+ hf_cache: Path | None,
95
+ local_snapshot: Path | None,
96
+ ) -> list[str]:
97
+ """Fetch and parse the split txt for the chosen split."""
98
+ txt_name = SPLIT_FILES[split_name]
99
+ if local_snapshot is not None:
100
+ local_txt = local_snapshot / txt_name
101
+ if not local_txt.exists():
102
+ raise FileNotFoundError(f"{local_txt} not found in local snapshot")
103
+ print(f"[1/5] Reading split file {txt_name} from local snapshot ...")
104
+ else:
105
+ from huggingface_hub import hf_hub_download
106
+ print(f"[1/5] Fetching split file {txt_name} from {repo_id} ...")
107
+ local_txt = Path(hf_hub_download(
108
+ repo_id=repo_id,
109
+ filename=txt_name,
110
+ repo_type=REPO_TYPE,
111
+ cache_dir=str(hf_cache) if hf_cache else None,
112
+ ))
113
+ ids = [ln.strip() for ln in Path(local_txt).read_text().splitlines() if ln.strip()]
114
+ print(f" {len(ids)} subject ids in '{split_name}'")
115
+ return ids
116
+
117
+
118
+ def download_subject_files(
119
+ repo_id: str,
120
+ hf_cache: Path | None,
121
+ subject_ids: list[str],
122
+ ) -> Path:
123
+ """Snapshot only the subject files we need. Returns the snapshot root."""
124
+ from huggingface_hub import snapshot_download
125
+
126
+ patterns: list[str] = []
127
+ for vid in subject_ids:
128
+ for f in PER_SUBJECT_TRACKING:
129
+ patterns.append(f"subjects/{vid}/{f}")
130
+ patterns.append(f"subjects/{vid}/frames.tar")
131
+ patterns.append(f"subjects/{vid}/mattes.tar")
132
+ # Always grab dataset_frames.json (used for train/valid frame split inside TrackedData)
133
+ patterns.append("dataset_frames.json")
134
+
135
+ print(f"[2/5] snapshot_download from {repo_id} ({len(patterns)} patterns) ...")
136
+ snap = snapshot_download(
137
+ repo_id=repo_id,
138
+ repo_type=REPO_TYPE,
139
+ allow_patterns=patterns,
140
+ cache_dir=str(hf_cache) if hf_cache else None,
141
+ )
142
+ print(f" snapshot at: {snap}")
143
+ return Path(snap)
144
+
145
+
146
+ def merge_tracking(
147
+ snapshot: Path,
148
+ subject_ids: list[str],
149
+ out: Path,
150
+ ) -> None:
151
+ """Merge per-subject tracking files into the 5-file TrackedData bundle.
152
+
153
+ Per-subject `optim_tracking_ehm.pkl` and `id_share_params.pkl` are FLAT
154
+ (no top-level video_id key) — the merger wraps them under each video_id
155
+ so the result matches the format produced by `merge_ehmx_dataset.py`.
156
+ """
157
+ print(f"[3/5] Merging tracking files for {len(subject_ids)} subjects ...")
158
+ merged_optim: dict = {}
159
+ merged_id_share: dict = {}
160
+ merged_videos_info: dict = {}
161
+ n_frames_total = 0
162
+ missing: list[str] = []
163
+
164
+ t0 = time.time()
165
+ for i, vid in enumerate(subject_ids, 1):
166
+ sub = snapshot / "subjects" / vid / "tracking"
167
+ opt_p = sub / "optim_tracking_ehm.pkl"
168
+ id_p = sub / "id_share_params.pkl"
169
+ vi_p = sub / "videos_info.json"
170
+ if not (opt_p.exists() and id_p.exists() and vi_p.exists()):
171
+ missing.append(vid)
172
+ continue
173
+ with open(opt_p, "rb") as f:
174
+ merged_optim[vid] = pickle.load(f)
175
+ with open(id_p, "rb") as f:
176
+ merged_id_share[vid] = pickle.load(f)
177
+ with open(vi_p, "r") as f:
178
+ vi = json.load(f)
179
+ merged_videos_info.update(vi)
180
+ n_frames_total += len(merged_optim[vid])
181
+ if i % 50 == 0 or i == len(subject_ids):
182
+ elapsed = time.time() - t0
183
+ print(f" merged {i}/{len(subject_ids)} subjects "
184
+ f"({n_frames_total} frames so far, {elapsed:.1f}s)")
185
+
186
+ if missing:
187
+ print(f" WARNING: {len(missing)} subjects had missing tracking files: {missing[:5]}...", file=sys.stderr)
188
+
189
+ out.mkdir(parents=True, exist_ok=True)
190
+ with open(out / "optim_tracking_ehm.pkl", "wb") as f:
191
+ pickle.dump(merged_optim, f, protocol=pickle.HIGHEST_PROTOCOL)
192
+ with open(out / "id_share_params.pkl", "wb") as f:
193
+ pickle.dump(merged_id_share, f, protocol=pickle.HIGHEST_PROTOCOL)
194
+ with open(out / "videos_info.json", "w") as f:
195
+ json.dump(merged_videos_info, f)
196
+ print(f" wrote optim_tracking_ehm.pkl ({n_frames_total} frames)")
197
+ print(f" wrote id_share_params.pkl ({len(merged_id_share)} subjects)")
198
+ print(f" wrote videos_info.json ({len(merged_videos_info)} subjects)")
199
+
200
+ # Copy dataset_frames.json from snapshot (used by train/valid splits inside TrackedData)
201
+ src_frames = snapshot / "dataset_frames.json"
202
+ if src_frames.exists():
203
+ out_frames = out / "dataset_frames.json"
204
+ out_frames.write_text(src_frames.read_text())
205
+ print(f" copied dataset_frames.json")
206
+ else:
207
+ print(" WARNING: dataset_frames.json missing in snapshot — train/valid splits won't work")
208
+
209
+
210
+ def setup_frame_dirs(
211
+ snapshot: Path,
212
+ subject_ids: list[str],
213
+ out: Path,
214
+ keep_tars: bool,
215
+ ) -> tuple[Path, Path]:
216
+ """Materialize per-subject frames + mattes under out/frames_root, out/matte_root.
217
+
218
+ Handles both layouts:
219
+ - Snapshot has `subjects/<vid>/frames.tar` (HF upload case): extract into
220
+ out/frames_root/<vid>/ and (optionally) delete the tar to save disk.
221
+ - Snapshot has `subjects/<vid>/frames/` as a real dir or symlink (local
222
+ build_release.py output, or pre-extracted clone): symlink it from
223
+ out/frames_root/<vid> -> resolved frames dir.
224
+ """
225
+ import tarfile
226
+
227
+ frames_root = out / "frames_root"
228
+ matte_root = out / "matte_root"
229
+ frames_root.mkdir(parents=True, exist_ok=True)
230
+ matte_root.mkdir(parents=True, exist_ok=True)
231
+
232
+ print(f"[4/5] Setting up frames + mattes for {len(subject_ids)} subjects ...")
233
+ n_extracted = n_linked = n_missing = 0
234
+ for vid in subject_ids:
235
+ sub = snapshot / "subjects" / vid
236
+ for kind, dest_root in [("frames", frames_root), ("mattes", matte_root)]:
237
+ tar_path = sub / f"{kind}.tar"
238
+ dir_path = sub / kind
239
+ target = dest_root / vid
240
+ if target.exists() or target.is_symlink():
241
+ continue # idempotent
242
+ if tar_path.exists():
243
+ target.mkdir(parents=True, exist_ok=True)
244
+ with tarfile.open(tar_path, "r") as tar:
245
+ tar.extractall(path=target)
246
+ if not keep_tars:
247
+ tar_path.unlink()
248
+ n_extracted += 1
249
+ elif dir_path.exists():
250
+ # Resolve through any symlinks so the link in out/ is stable.
251
+ target.symlink_to(dir_path.resolve())
252
+ n_linked += 1
253
+ else:
254
+ print(f" WARNING: {vid}/{kind} not in snapshot (no .tar, no dir)", file=sys.stderr)
255
+ n_missing += 1
256
+ print(f" extracted={n_extracted // 2} linked={n_linked // 2} missing={n_missing}")
257
+ return frames_root, matte_root
258
+
259
+
260
+ def write_extra_info(out: Path, frames_root: Path, matte_root: Path) -> None:
261
+ """Write extra_info.json with absolute paths to the local extracted dirs."""
262
+ print("[5/5] Writing extra_info.json ...")
263
+ extra = {
264
+ "frames_root": str(frames_root.resolve()),
265
+ "matte_root": str(matte_root.resolve()),
266
+ "pshuman_root": None,
267
+ }
268
+ with open(out / "extra_info.json", "w") as f:
269
+ json.dump(extra, f, indent=2)
270
+ print(f" frames_root = {extra['frames_root']}")
271
+ print(f" matte_root = {extra['matte_root']}")
272
+
273
+
274
+ def main() -> int:
275
+ args = parse_args()
276
+ out = args.out.expanduser().resolve()
277
+ local_snapshot = args.local_snapshot.expanduser().resolve() if args.local_snapshot else None
278
+
279
+ if local_snapshot is None:
280
+ try:
281
+ import huggingface_hub # noqa: F401
282
+ except ImportError:
283
+ print("ERROR: huggingface_hub is required. Install with:", file=sys.stderr)
284
+ print(" pip install huggingface_hub", file=sys.stderr)
285
+ return 2
286
+
287
+ subject_ids = read_subject_ids(args.split, args.repo_id, args.hf_cache, local_snapshot)
288
+
289
+ if local_snapshot is not None:
290
+ print(f"[2/5] Using local snapshot at {local_snapshot} (no download)")
291
+ snapshot = local_snapshot
292
+ elif args.skip_download:
293
+ print("[2/5] --skip_download: assuming local snapshot is already populated")
294
+ from huggingface_hub import snapshot_download
295
+ snapshot = Path(snapshot_download(
296
+ repo_id=args.repo_id,
297
+ repo_type=REPO_TYPE,
298
+ allow_patterns=["dataset_frames.json"],
299
+ cache_dir=str(args.hf_cache) if args.hf_cache else None,
300
+ ))
301
+ else:
302
+ snapshot = download_subject_files(
303
+ repo_id=args.repo_id,
304
+ hf_cache=args.hf_cache,
305
+ subject_ids=subject_ids,
306
+ )
307
+
308
+ merge_tracking(snapshot, subject_ids, out)
309
+
310
+ if args.skip_extract:
311
+ print("[4/5] --skip_extract: skipping frames/mattes setup")
312
+ frames_root = out / "frames_root"
313
+ matte_root = out / "matte_root"
314
+ else:
315
+ frames_root, matte_root = setup_frame_dirs(snapshot, subject_ids, out, args.keep_tars)
316
+
317
+ write_extra_info(out, frames_root, matte_root)
318
+
319
+ print()
320
+ print("=" * 60)
321
+ print(f"DONE. Local dataset assembled at: {out}")
322
+ print(f" Point training config at: DATASET.data_path: {out}")
323
+ return 0
324
+
325
+
326
+ if __name__ == "__main__":
327
+ raise SystemExit(main())
metadata/base/-2Dj9M71JAc.jpg ADDED

Git LFS Details

  • SHA256: fd66423860430745ad481f4c185cb2827333902c48834693fbe746ecd03717ba
  • Pointer size: 132 Bytes
  • Size of remote file: 5.13 MB
metadata/base/-E97Kgi0sR4.jpg ADDED

Git LFS Details

  • SHA256: 2f22bef432a5f9d1865240ccd3d17a5d81182c63a28f4b39a69889cc1e960227
  • Pointer size: 132 Bytes
  • Size of remote file: 2.88 MB
metadata/base/-FOCpMAww28.jpg ADDED

Git LFS Details

  • SHA256: 15061a65c177786176e24363ab353ed887218788276592da134a901b03bfd75b
  • Pointer size: 132 Bytes
  • Size of remote file: 4.1 MB
metadata/base/-H1tUMRJoeo-Scenes.jpg ADDED

Git LFS Details

  • SHA256: eb60ee1ff5dc58d6f5f1b699bed8ede699dc695a7f568dc8ce63ea819dad967f
  • Pointer size: 132 Bytes
  • Size of remote file: 3.72 MB
metadata/base/-H1tUMRJoeo.jpg ADDED

Git LFS Details

  • SHA256: 227c3583abecdc5048741d20166ce39b4097c21afc416d95373b435919a71916
  • Pointer size: 132 Bytes
  • Size of remote file: 3.29 MB
metadata/base/-I3e6Mkfp7M.jpg ADDED

Git LFS Details

  • SHA256: 1c92d9309698eec04d81758f8f00be06a7930dfa29f0771aadf6a97e8c466873
  • Pointer size: 132 Bytes
  • Size of remote file: 2.81 MB
metadata/base/-MTRxRO5SRA.jpg ADDED

Git LFS Details

  • SHA256: 78138d2da0501a8204980c903b43d33a280a8bb00de84630a922deed67a6f3ea
  • Pointer size: 132 Bytes
  • Size of remote file: 3.91 MB
metadata/base/-RkhAP0_ms4.jpg ADDED

Git LFS Details

  • SHA256: 8c188f93d3442a9b5532d9974df62775bd1d1dfe975439e24435432231bf938a
  • Pointer size: 132 Bytes
  • Size of remote file: 2.79 MB
metadata/base/-Z-ul0GzzM4.jpg ADDED

Git LFS Details

  • SHA256: c08d5a91f16f927a99e0cfaf4bd3b5ebc5c0502224d1691c07ceb9c2445f8d27
  • Pointer size: 132 Bytes
  • Size of remote file: 4.85 MB
metadata/base/-aouBn7IKIo.jpg ADDED

Git LFS Details

  • SHA256: 767bed24f3ca3018808194ef851f9dd07052a85ff957a0290408fb24819351c7
  • Pointer size: 132 Bytes
  • Size of remote file: 2.93 MB
metadata/base/-eBUcBfkVCo.jpg ADDED

Git LFS Details

  • SHA256: 5b0911532490219ce957839f846b335c558e9f75cc2af82e6fe4b36705f14565
  • Pointer size: 132 Bytes
  • Size of remote file: 3.12 MB
metadata/base/-hY9QSdaReY.jpg ADDED

Git LFS Details

  • SHA256: 307bca059df6cd594e90dc7f32a35e3ccbab22e388e97d71a7fae1de6552fc2b
  • Pointer size: 132 Bytes
  • Size of remote file: 3.58 MB
metadata/base/-mhe7COLiR0.jpg ADDED

Git LFS Details

  • SHA256: 202801dd3e36012b34b9296e29776ec75f8a040360f02fd69afe49962e07922c
  • Pointer size: 132 Bytes
  • Size of remote file: 3.87 MB
metadata/base/-moW9jvvMr4.jpg ADDED

Git LFS Details

  • SHA256: 56baca3197a599d9bd299128a2b94edac2c4e52e77d5c08e02a766295b98c7f6
  • Pointer size: 132 Bytes
  • Size of remote file: 3.42 MB
metadata/base/-nKdufEaL8k.jpg ADDED

Git LFS Details

  • SHA256: 55a7b2443f2e30ed4c62103573d60fdf1d9d2163dae4aef59b91758d40423a1f
  • Pointer size: 132 Bytes
  • Size of remote file: 2.39 MB
metadata/base/-vZXgApsPCQ.jpg ADDED

Git LFS Details

  • SHA256: ed4b2b8e011f57c5e8e6627b3f7f1471b3a35a4f04232bc796fdd937a29cd3fb
  • Pointer size: 132 Bytes
  • Size of remote file: 3.69 MB
metadata/base/-vqV-gHa2FE.jpg ADDED

Git LFS Details

  • SHA256: aa5f5a89e2b07d64ca355bd06f9ecef151ed9a2d90eb63d4086cf79537dd84ea
  • Pointer size: 132 Bytes
  • Size of remote file: 4.15 MB
metadata/base/0-FkPxSc_M4.jpg ADDED

Git LFS Details

  • SHA256: c268b919156de0816454d5b6280cf9d901c7fbb9a64d4bc3b0b1512a37900b89
  • Pointer size: 132 Bytes
  • Size of remote file: 1.56 MB
metadata/base/05jJodDVJRQ.jpg ADDED

Git LFS Details

  • SHA256: ad1db6506451d4c037fc64682eade53a6907b39ddc096f5f2f725c1328179195
  • Pointer size: 132 Bytes
  • Size of remote file: 3.99 MB
metadata/base/08ZWROqoTZo.jpg ADDED

Git LFS Details

  • SHA256: 664075625279891c1746820ffc8388a68fb7e71920b7d53f017826f50e7e48d8
  • Pointer size: 132 Bytes
  • Size of remote file: 5.99 MB
metadata/base/0DHywidLX6A.jpg ADDED

Git LFS Details

  • SHA256: 4f65dcc7d02323d19822edbd4107668980ab168cf9fa6e9ee67e3acb963e771c
  • Pointer size: 132 Bytes
  • Size of remote file: 4.13 MB
metadata/base/0FQXicAGy5U.jpg ADDED

Git LFS Details

  • SHA256: cdbce6ee7380bbe2584d322763e2232442de15b0784e1dcbfc2977aa3a01903d
  • Pointer size: 132 Bytes
  • Size of remote file: 6.04 MB
metadata/base/0Fi83BHQsMA.jpg ADDED

Git LFS Details

  • SHA256: e3f82b31edee8901f0cdc43c8413fecaff04f64294bd47caeecfb90bc6f5bd31
  • Pointer size: 132 Bytes
  • Size of remote file: 2.3 MB
metadata/base/0JGarsZE1rk.jpg ADDED

Git LFS Details

  • SHA256: 202f903d3d6f02ab547c8af8453547a8537d3cd700d2a11536489d94f5a04edc
  • Pointer size: 132 Bytes
  • Size of remote file: 2.69 MB
metadata/base/0K5OO2ybueM.jpg ADDED

Git LFS Details

  • SHA256: 73986ef8857e0ce7a83eb5fdf6575b6ffa73e4a63676fd37f004cdd5a7b1a324
  • Pointer size: 132 Bytes
  • Size of remote file: 2.05 MB
metadata/base/0MD4Ymjyc2I.jpg ADDED

Git LFS Details

  • SHA256: c8dd65174ef3cf5fc54481a4b2c72710a84b1837322c944ab0f85dc1baedef7b
  • Pointer size: 132 Bytes
  • Size of remote file: 3.77 MB
metadata/base/0NV1KdWRHck.jpg ADDED

Git LFS Details

  • SHA256: cea592df6122d74a825060df61071c383a037eddf26d0a753d4ec33b224200d0
  • Pointer size: 132 Bytes
  • Size of remote file: 4.22 MB
metadata/base/0PAy1zBtTbw.jpg ADDED

Git LFS Details

  • SHA256: 83a3360d700f74d82ecd824b3a152a7725085dae8a66005ffef577bf406d3216
  • Pointer size: 132 Bytes
  • Size of remote file: 3.06 MB
metadata/base/0R9zjn9BBvA.jpg ADDED

Git LFS Details

  • SHA256: 4c9b643cc6a64b2d5b9477ab51891ea83894e65f7ba03fa5b0474656ab70fd00
  • Pointer size: 132 Bytes
  • Size of remote file: 4.7 MB
metadata/base/0SkdP36wiAU.jpg ADDED

Git LFS Details

  • SHA256: a9fb492c6387dcb1ea767f97fe2ae17aaf9499bea758def7db0eec17e93babd2
  • Pointer size: 132 Bytes
  • Size of remote file: 3.17 MB
metadata/base/0bRocfcPhHU.jpg ADDED

Git LFS Details

  • SHA256: cb62ede064590216a28e1ae37309219f8d2b1bd40bfbecb1d0794ea2264af31a
  • Pointer size: 132 Bytes
  • Size of remote file: 3.43 MB
metadata/base/0d6iSvF1UmA.jpg ADDED

Git LFS Details

  • SHA256: d85b45df86108878095c15e5dd56d9fb6845efda19addeeea683c359e0ae7b94
  • Pointer size: 132 Bytes
  • Size of remote file: 6.2 MB
metadata/base/0g0S34XE2b8.jpg ADDED

Git LFS Details

  • SHA256: 94715705ffe209ef9bc4dd1c8d18a8be4427a1e0cbba94e5a0c9248485030d60
  • Pointer size: 132 Bytes
  • Size of remote file: 5.24 MB
metadata/base/0g2WE1qXiKM.jpg ADDED

Git LFS Details

  • SHA256: 75e29ad6a36397616f78e1f66e72473cf9a05bf44eac074f2e93eb531cba6a30
  • Pointer size: 132 Bytes
  • Size of remote file: 4.81 MB
metadata/base/0gMCZFHv9v8.jpg ADDED

Git LFS Details

  • SHA256: 3f6a49372e72b6fc5f31a50934e067395dae29346ccb36ee8419c9c759abf765
  • Pointer size: 132 Bytes
  • Size of remote file: 3.5 MB
metadata/base/0gks6ceq4eQ.jpg ADDED

Git LFS Details

  • SHA256: c62294313029b19f3c4d7c75d56daf2c216e8fce409db52afd4889e5dae70628
  • Pointer size: 132 Bytes
  • Size of remote file: 3.03 MB
metadata/base/0iIh5YYDR2o.jpg ADDED

Git LFS Details

  • SHA256: 5be10864c243846feff28739e6bb997517630ae8c3124d123766c0782747390d
  • Pointer size: 132 Bytes
  • Size of remote file: 3.67 MB
metadata/base/0txtVkBUdSQ.jpg ADDED

Git LFS Details

  • SHA256: add069ced4087c15072187c8e720e915534fb20d97874212ee1a3d6c6af29b45
  • Pointer size: 132 Bytes
  • Size of remote file: 4.12 MB
metadata/base/0ygtX2nyexo.jpg ADDED

Git LFS Details

  • SHA256: aeeefff878bc98499acc5825f127dc9adef8a0d24779a8280c2095ed7f254a61
  • Pointer size: 132 Bytes
  • Size of remote file: 2.96 MB
metadata/base/13rqtiAPISY.jpg ADDED

Git LFS Details

  • SHA256: edbc4fb66457d27409213466e7adfc835495529f027944f91bb67b9c3083d49a
  • Pointer size: 132 Bytes
  • Size of remote file: 4.35 MB
metadata/base/16cM-RFid9U.jpg ADDED

Git LFS Details

  • SHA256: 260716658901d415e384ff147c2046deca5f90e99ceec1cce545d86bd7940e52
  • Pointer size: 132 Bytes
  • Size of remote file: 4.32 MB
metadata/base/16p9YRF0l-g.jpg ADDED

Git LFS Details

  • SHA256: 99a999780067a8cca4066dcc445e8ba2138fdc61009810ff168be28c1bc518c8
  • Pointer size: 132 Bytes
  • Size of remote file: 3.41 MB
metadata/base/18zvlz5CxPE.jpg ADDED

Git LFS Details

  • SHA256: f10519d73cb9a7ec20205f5fdf538c91d341059dc31004eac69f309ab82508dc
  • Pointer size: 132 Bytes
  • Size of remote file: 1.25 MB
metadata/base/1AT5klu_yAQ.jpg ADDED

Git LFS Details

  • SHA256: 61cbfcd0e8615e3da7a792022a7b6f5fdb2449275b31e4218bb6736425616630
  • Pointer size: 132 Bytes
  • Size of remote file: 3.5 MB