Dan Bochman commited on
Commit
e727ae9
·
0 Parent(s):

Initial commit: FASHN VTON v1.5 HuggingFace Space

Browse files

Gradio demo for the open-source virtual try-on model featuring:
- Dual image input (person + garment) with category selection
- Example images for quick testing
- Direct integration with fashn-ai/fashn-vton-1.5 weights
- Discoverability tags for HuggingFace search

.gitattributes ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.webp filter=lfs diff=lfs merge=lfs -text
37
+ *.png filter=lfs diff=lfs merge=lfs -text
38
+ *.jpg filter=lfs diff=lfs merge=lfs -text
39
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Virtual environment
2
+ .venv/
3
+ venv/
4
+ env/
5
+
6
+ # Model weights (downloaded at runtime)
7
+ weights/
8
+
9
+ # Python
10
+ __pycache__/
11
+ *.py[cod]
12
+ *$py.class
13
+ *.so
14
+
15
+ # IDE
16
+ .idea/
17
+ .vscode/
18
+ *.swp
19
+ *.swo
20
+
21
+ # OS
22
+ .DS_Store
23
+ Thumbs.db
README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: FASHN VTON v1.5
3
+ emoji: "\U0001F457"
4
+ colorFrom: purple
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 6.3.0
8
+ python_version: 3.10.14
9
+ app_file: app.py
10
+ pinned: false
11
+ license: apache-2.0
12
+ short_description: Maskless try-on directly in pixel space
13
+ tags:
14
+ - virtual-try-on
15
+ - diffusion
16
+ - fashion
17
+ - image-to-image
18
+ ---
app.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """FASHN VTON v1.5 HuggingFace Space Demo."""
2
+
3
+ import os
4
+
5
+ import gradio as gr
6
+ import torch
7
+ from huggingface_hub import hf_hub_download
8
+ from PIL import Image
9
+
10
+ # Handle spaces.GPU decorator for local vs HuggingFace execution
11
+ try:
12
+ import spaces
13
+
14
+ GPU_DECORATOR = spaces.GPU
15
+ except ImportError:
16
+ GPU_DECORATOR = lambda func: func
17
+
18
+ # ----------------- CONFIG ----------------- #
19
+
20
+ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
21
+ ASSETS_DIR = os.path.join(SCRIPT_DIR, "assets")
22
+ WEIGHTS_DIR = os.path.join(SCRIPT_DIR, "weights")
23
+
24
+ CATEGORIES = ["tops", "bottoms", "one-pieces"]
25
+ GARMENT_PHOTO_TYPES = ["model", "flat-lay"]
26
+
27
+ # Global pipeline instance (lazy loaded)
28
+ _pipeline = None
29
+
30
+
31
+ # ----------------- HELPERS ----------------- #
32
+
33
+
34
+ def download_weights():
35
+ """Download model weights from HuggingFace Hub."""
36
+ os.makedirs(WEIGHTS_DIR, exist_ok=True)
37
+ dwpose_dir = os.path.join(WEIGHTS_DIR, "dwpose")
38
+ os.makedirs(dwpose_dir, exist_ok=True)
39
+
40
+ # Download TryOnModel weights
41
+ tryon_path = os.path.join(WEIGHTS_DIR, "model.safetensors")
42
+ if not os.path.exists(tryon_path):
43
+ print("Downloading TryOnModel weights...")
44
+ hf_hub_download(
45
+ repo_id="fashn-ai/fashn-vton-1.5",
46
+ filename="model.safetensors",
47
+ local_dir=WEIGHTS_DIR,
48
+ )
49
+
50
+ # Download DWPose models
51
+ dwpose_files = ["yolox_l.onnx", "dw-ll_ucoco_384.onnx"]
52
+ for filename in dwpose_files:
53
+ filepath = os.path.join(dwpose_dir, filename)
54
+ if not os.path.exists(filepath):
55
+ print(f"Downloading DWPose/{filename}...")
56
+ hf_hub_download(
57
+ repo_id="fashn-ai/DWPose",
58
+ filename=filename,
59
+ local_dir=dwpose_dir,
60
+ )
61
+
62
+ print("Weights downloaded successfully!")
63
+
64
+
65
+ # ----------------- MODEL LOADING ----------------- #
66
+
67
+
68
+ def get_pipeline():
69
+ """Lazy-load the pipeline on first use (ensures GPU is available on ZeroGPU)."""
70
+ global _pipeline
71
+ if _pipeline is None:
72
+ # Check CUDA availability (will be true inside @spaces.GPU context)
73
+ if not torch.cuda.is_available():
74
+ raise gr.Error(
75
+ "CUDA is not available. This demo requires a GPU to run. "
76
+ "If you're on HuggingFace Spaces, please try again in a moment."
77
+ )
78
+
79
+ # Enable TF32 for faster computation on Ampere+ GPUs
80
+ if torch.cuda.get_device_properties(0).major >= 8:
81
+ torch.backends.cuda.matmul.allow_tf32 = True
82
+ torch.backends.cudnn.allow_tf32 = True
83
+
84
+ print("Downloading weights (if needed)...")
85
+ download_weights()
86
+
87
+ print("Loading pipeline...")
88
+ from fashn_vton import TryOnPipeline
89
+
90
+ _pipeline = TryOnPipeline(weights_dir=WEIGHTS_DIR, device="cuda")
91
+ print("Pipeline loaded on CUDA!")
92
+
93
+ return _pipeline
94
+
95
+
96
+ # ----------------- INFERENCE ----------------- #
97
+
98
+
99
+ @GPU_DECORATOR
100
+ def try_on(
101
+ person_image: Image.Image,
102
+ garment_image: Image.Image,
103
+ category: str,
104
+ garment_photo_type: str,
105
+ num_timesteps: int,
106
+ guidance_scale: float,
107
+ seed: int,
108
+ segmentation_free: bool,
109
+ ) -> Image.Image:
110
+ """Run virtual try-on inference."""
111
+ if person_image is None:
112
+ raise gr.Error("Please upload a person image")
113
+ if garment_image is None:
114
+ raise gr.Error("Please upload a garment image")
115
+
116
+ # Handle seed (guard against None or invalid values)
117
+ if seed is None or seed < 0:
118
+ seed = 42
119
+
120
+ # Convert to RGB if needed
121
+ if person_image.mode != "RGB":
122
+ person_image = person_image.convert("RGB")
123
+ if garment_image.mode != "RGB":
124
+ garment_image = garment_image.convert("RGB")
125
+
126
+ # Get pipeline (lazy loads on first call)
127
+ pipeline = get_pipeline()
128
+
129
+ # Run inference
130
+ result = pipeline(
131
+ person_image=person_image,
132
+ garment_image=garment_image,
133
+ category=category,
134
+ garment_photo_type=garment_photo_type,
135
+ num_samples=1,
136
+ num_timesteps=num_timesteps,
137
+ guidance_scale=guidance_scale,
138
+ seed=int(seed),
139
+ segmentation_free=segmentation_free,
140
+ )
141
+
142
+ return result.images[0]
143
+
144
+
145
+ # ----------------- UI ----------------- #
146
+
147
+ # Custom CSS
148
+ CUSTOM_CSS = """
149
+ .contain img {
150
+ object-fit: contain !important;
151
+ max-height: 856px !important;
152
+ max-width: 576px !important;
153
+ }
154
+ """
155
+
156
+ # Load HTML content
157
+ with open(os.path.join(SCRIPT_DIR, "banner.html"), "r") as f:
158
+ banner_html = f.read()
159
+ with open(os.path.join(SCRIPT_DIR, "tips.html"), "r") as f:
160
+ tips_html = f.read()
161
+
162
+ # Build example paths
163
+ examples_dir = os.path.join(ASSETS_DIR, "examples")
164
+
165
+ # Paired examples: [person_path, garment_path, category, garment_photo_type]
166
+ paired_examples = [
167
+ [os.path.join(examples_dir, "person1.png"), os.path.join(examples_dir, "garment1.jpeg"), "one-pieces", "model"],
168
+ [os.path.join(examples_dir, "person2.png"), os.path.join(examples_dir, "garment2.webp"), "tops", "model"],
169
+ [os.path.join(examples_dir, "person3.png"), os.path.join(examples_dir, "garment3.jpeg"), "tops", "flat-lay"],
170
+ [os.path.join(examples_dir, "person4.png"), os.path.join(examples_dir, "garment4.webp"), "tops", "model"],
171
+ [os.path.join(examples_dir, "person5.png"), os.path.join(examples_dir, "garment5.jpeg"), "bottoms", "flat-lay"],
172
+ [os.path.join(examples_dir, "person6.png"), os.path.join(examples_dir, "garment6.webp"), "one-pieces", "model"],
173
+ ]
174
+
175
+ # Individual examples (classic from repo)
176
+ person_only_examples = [os.path.join(examples_dir, "person0.png")]
177
+ garment_only_examples = [os.path.join(examples_dir, "garment0.png")]
178
+
179
+ # Build UI
180
+ with gr.Blocks(css=CUSTOM_CSS) as demo:
181
+ # Header
182
+ gr.HTML(banner_html)
183
+ gr.HTML(tips_html)
184
+
185
+ with gr.Row(equal_height=False):
186
+ # Left column: Inputs
187
+ with gr.Column(scale=1):
188
+ person_image = gr.Image(
189
+ label="Person Image",
190
+ type="pil",
191
+ sources=["upload", "clipboard"],
192
+ elem_classes=["contain"],
193
+ )
194
+
195
+ # Individual person examples
196
+ gr.Examples(
197
+ examples=person_only_examples,
198
+ inputs=person_image,
199
+ label="Person Examples",
200
+ )
201
+
202
+ garment_image = gr.Image(
203
+ label="Garment Image",
204
+ type="pil",
205
+ sources=["upload", "clipboard"],
206
+ elem_classes=["contain"],
207
+ )
208
+
209
+ # Individual garment examples
210
+ gr.Examples(
211
+ examples=garment_only_examples,
212
+ inputs=garment_image,
213
+ label="Garment Examples",
214
+ )
215
+
216
+ with gr.Row():
217
+ category = gr.Dropdown(
218
+ choices=CATEGORIES,
219
+ value="tops",
220
+ label="Garment Category",
221
+ )
222
+ garment_photo_type = gr.Dropdown(
223
+ choices=GARMENT_PHOTO_TYPES,
224
+ value="model",
225
+ label="Garment Photo Type",
226
+ )
227
+
228
+ run_button = gr.Button("Try On", variant="primary", size="lg")
229
+
230
+ # Advanced settings
231
+ with gr.Accordion("Advanced Settings", open=False):
232
+ num_timesteps = gr.Slider(
233
+ minimum=10,
234
+ maximum=50,
235
+ value=50,
236
+ step=5,
237
+ label="Sampling Steps",
238
+ info="Higher = better quality, slower.",
239
+ )
240
+ guidance_scale = gr.Slider(
241
+ minimum=1.0,
242
+ maximum=3.0,
243
+ value=1.5,
244
+ step=0.1,
245
+ label="Guidance Scale",
246
+ info="How closely to follow the garment. 1.5 recommended.",
247
+ )
248
+ seed = gr.Number(
249
+ value=42,
250
+ label="Seed",
251
+ info="Random seed for reproducibility.",
252
+ precision=0,
253
+ )
254
+ segmentation_free = gr.Checkbox(
255
+ value=True,
256
+ label="Segmentation Free",
257
+ info="Preserves body features and allows unconstrained garment volume. Disable for tighter garment fitting.",
258
+ )
259
+
260
+ # Right column: Output
261
+ with gr.Column(scale=1):
262
+ result_image = gr.Image(
263
+ label="Try-On Result",
264
+ type="pil",
265
+ interactive=False,
266
+ elem_classes=["contain"],
267
+ )
268
+
269
+ # Paired examples at the bottom
270
+ gr.Examples(
271
+ examples=paired_examples,
272
+ inputs=[person_image, garment_image, category, garment_photo_type],
273
+ label="Complete Examples (click to load person + garment + settings)",
274
+ )
275
+
276
+ # Event handler
277
+ run_button.click(
278
+ fn=try_on,
279
+ inputs=[
280
+ person_image,
281
+ garment_image,
282
+ category,
283
+ garment_photo_type,
284
+ num_timesteps,
285
+ guidance_scale,
286
+ seed,
287
+ segmentation_free,
288
+ ],
289
+ outputs=[result_image],
290
+ )
291
+
292
+ # Configure queue with concurrency limit to prevent GPU OOM
293
+ demo.queue(default_concurrency_limit=1, max_size=30)
294
+
295
+ if __name__ == "__main__":
296
+ demo.launch(share=False)
assets/examples/garment0.png ADDED

Git LFS Details

  • SHA256: c3ff691d60a92f57887c11052a41e86e63d7927dc1f85e37b30afd7c49481818
  • Pointer size: 131 Bytes
  • Size of remote file: 359 kB
assets/examples/garment1.jpeg ADDED

Git LFS Details

  • SHA256: 8681fc59b1a7204144d9724840ed52b2a6174611a49abb20b57a3552d7d3cc84
  • Pointer size: 131 Bytes
  • Size of remote file: 315 kB
assets/examples/garment2.webp ADDED

Git LFS Details

  • SHA256: 58831c35007da83b5eafa3cee728c53e5fddf8fc49e905f41c28e076210a210e
  • Pointer size: 131 Bytes
  • Size of remote file: 815 kB
assets/examples/garment3.jpeg ADDED

Git LFS Details

  • SHA256: 182917cd3a6380ffb7994bdac0e7cce8d197af4b2ae344d98657e85dcffff96d
  • Pointer size: 131 Bytes
  • Size of remote file: 281 kB
assets/examples/garment4.webp ADDED

Git LFS Details

  • SHA256: d5cd34018de2fc436257d0d95f222243c62da25db805d5ae1bdcf277b6388fea
  • Pointer size: 130 Bytes
  • Size of remote file: 45.9 kB
assets/examples/garment5.jpeg ADDED

Git LFS Details

  • SHA256: fd6eea00b8145961338a3e43845cfb63bdba3a4dc1653edc5eba14bcd8148a35
  • Pointer size: 131 Bytes
  • Size of remote file: 175 kB
assets/examples/garment6.webp ADDED

Git LFS Details

  • SHA256: b42ebd9e4fe90f8911695cdfe7068d4cc29b77886923e09b6b986c5d4f30fae4
  • Pointer size: 130 Bytes
  • Size of remote file: 89.1 kB
assets/examples/person0.png ADDED

Git LFS Details

  • SHA256: e70cd0491254708e52228b6e6b9c831a85211374fccc439583e591d3c5f7dcee
  • Pointer size: 132 Bytes
  • Size of remote file: 1.24 MB
assets/examples/person1.png ADDED

Git LFS Details

  • SHA256: 9c425d154d7bea584b4060e7e1ed28cd676e3f9ea3c475b922e986d04a48d273
  • Pointer size: 132 Bytes
  • Size of remote file: 1.31 MB
assets/examples/person2.png ADDED

Git LFS Details

  • SHA256: 0fbd68ea4972e9bfaf582ef25c4d8c466582c2dce66bca021d4f16ebe35c7471
  • Pointer size: 132 Bytes
  • Size of remote file: 1.8 MB
assets/examples/person3.png ADDED

Git LFS Details

  • SHA256: befcbc71f0bdd331d1148974f2db5377fa5b8d90969d6153a5d27cb1a135f418
  • Pointer size: 132 Bytes
  • Size of remote file: 2.03 MB
assets/examples/person4.png ADDED

Git LFS Details

  • SHA256: a1381873afecc50693e9fa439b9f16ec9f06118385696a8d06d1db0017240e91
  • Pointer size: 132 Bytes
  • Size of remote file: 1.52 MB
assets/examples/person5.png ADDED

Git LFS Details

  • SHA256: d4b58b27a752a07600f477faf187263bca846e855761c41079d546dc646ab2d7
  • Pointer size: 132 Bytes
  • Size of remote file: 1.18 MB
assets/examples/person6.png ADDED

Git LFS Details

  • SHA256: d1373eda537929ef55808354459ad1bc929fadfed31c5e85671a89ffd8f18965
  • Pointer size: 132 Bytes
  • Size of remote file: 2.2 MB
banner.html ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div style="
2
+ display: flex;
3
+ flex-direction: column;
4
+ justify-content: center;
5
+ align-items: center;
6
+ text-align: center;
7
+ background: linear-gradient(45deg, #1a1a1a 0%, #333333 100%);
8
+ padding: 24px;
9
+ gap: 24px;
10
+ border-radius: 8px;
11
+ ">
12
+ <div style="display: flex; gap: 8px;">
13
+ <h1 style="
14
+ font-size: 42px;
15
+ color: #fafafa;
16
+ margin: 0;
17
+ font-family: 'Trebuchet MS', 'Lucida Sans Unicode', 'Lucida Grande',
18
+ 'Lucida Sans', Arial, sans-serif;
19
+ ">
20
+ FASHN VTON v1.5
21
+ </h1>
22
+ </div>
23
+
24
+ <p style="
25
+ margin: 0;
26
+ line-height: 1.6rem;
27
+ font-size: 18px;
28
+ color: #fafafa;
29
+ opacity: 0.9;
30
+ ">
31
+ Efficient Maskless Virtual Try-On in Pixel Space
32
+ </p>
33
+
34
+ <p style="
35
+ margin: 0;
36
+ line-height: 1.5rem;
37
+ font-size: 14px;
38
+ color: #fafafa;
39
+ opacity: 0.7;
40
+ ">
41
+ Virtual try-on model that generates photorealistic images directly in pixel space without requiring segmentation masks.
42
+ </p>
43
+
44
+ <div style="
45
+ display: flex;
46
+ justify-content: center;
47
+ align-items: center;
48
+ text-align: center;
49
+ gap: 4px;
50
+ flex-wrap: wrap;
51
+ ">
52
+ <a href="https://fashn.ai"><img src="https://custom-icon-badges.demolab.com/badge/FASHN_AI-333333?style=for-the-badge&logo=fashn" alt="FASHN AI" /></a>
53
+ <a href="https://fashn.ai/research/vton-1-5"><img src="https://img.shields.io/badge/Project-Page-1A1A1A?style=for-the-badge" alt="Project Page" /></a>
54
+ <a href="https://github.com/fashn-AI/fashn-vton-1.5"><img src="https://img.shields.io/badge/github-%23121011.svg?style=for-the-badge&logo=github&logoColor=white" alt="Github" /></a>
55
+ <a href="https://huggingface.co/fashn-ai/fashn-vton-1.5"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Model-FFD21E?style=for-the-badge" alt="HuggingFace Model" /></a>
56
+ <a href="#"><img src="https://img.shields.io/badge/arXiv-Coming%20Soon-b31b1b?style=for-the-badge&logo=arXiv&logoColor=white" alt="arXiv" /></a>
57
+ <a href="https://discord.gg/4XRXgJ2ysU"><img src="https://img.shields.io/badge/Discord-%235865F2.svg?style=for-the-badge&logo=discord&logoColor=white" alt="Discord" /></a>
58
+ </div>
59
+ </div>
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu121
2
+ torch
3
+ torchvision
4
+ gradio==6.3.0
5
+ fashn-vton @ git+https://github.com/fashn-AI/fashn-vton-1.5.git@38aafe2185df40a3e8a5442e950c422c3d9dcb5a
6
+ onnxruntime-gpu>=1.14.0
7
+ spaces
8
+ pillow
9
+ numpy
10
+ huggingface_hub
tips.html ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div style="
2
+ padding: 12px;
3
+ border: 1px solid #333333;
4
+ border-radius: 8px;
5
+ text-align: center;
6
+ display: flex;
7
+ flex-direction: column;
8
+ gap: 8px;
9
+ ">
10
+ <b style="font-size: 18px;">Tips for best results</b>
11
+
12
+ <ul style="
13
+ display: flex;
14
+ gap: 16px;
15
+ justify-content: center;
16
+ list-style: none;
17
+ padding: 0;
18
+ margin: 0;
19
+ flex-wrap: wrap;
20
+ ">
21
+ <li>Single person, clearly visible</li>
22
+ <li>|</li>
23
+ <li>Match category to garment type</li>
24
+ <li>|</li>
25
+ <li>Use "flat-lay" for product shots</li>
26
+ <li>|</li>
27
+ <li>2:3 aspect ratio optimal</li>
28
+ </ul>
29
+ </div>