Spaces:
Running on Zero
Running on Zero
Upload 9 files
Browse files- README.md +5 -7
- app.py +613 -230
- optimization.py +107 -11
- optimization_utils.py +87 -17
- requirements.txt +12 -5
README.md
CHANGED
|
@@ -1,14 +1,12 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
-
pinned:
|
| 10 |
-
license: mit
|
| 11 |
-
short_description: 'Kontext image editing on FLUX[dev] '
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Wan 2 2 First Last Frame
|
| 3 |
+
emoji: 💻
|
| 4 |
+
colorFrom: purple
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 5.29.1
|
| 8 |
app_file: app.py
|
| 9 |
+
pinned: false
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
-
# PyTorch 2.8 (temporary hack)
|
| 2 |
import os
|
|
|
|
| 3 |
os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
|
| 4 |
|
| 5 |
-
#
|
| 6 |
try:
|
| 7 |
import spaces
|
| 8 |
except:
|
|
@@ -12,292 +12,675 @@ except:
|
|
| 12 |
return lambda *dummy_args, **dummy_kwargs: function(*dummy_args, **dummy_kwargs)
|
| 13 |
return decorator
|
| 14 |
|
| 15 |
-
import gradio as gr
|
| 16 |
-
import numpy as np
|
| 17 |
import torch
|
| 18 |
-
import
|
| 19 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
from datetime import datetime
|
| 21 |
-
|
| 22 |
from PIL import Image
|
| 23 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
import zipfile
|
| 25 |
-
import shutil
|
| 26 |
-
from pathlib import Path
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
|
|
|
|
| 31 |
from optimization import optimize_pipeline_
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
MAX_SEED = np.iinfo(np.int32).max
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
input_image_debug_value = [None]
|
|
|
|
| 39 |
prompt_debug_value = [None]
|
| 40 |
-
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
"""
|
| 43 |
-
|
| 44 |
-
and return its absolute path.
|
| 45 |
"""
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
prompt,
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
steps
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
| 67 |
progress=gr.Progress(track_tqdm=True)
|
| 68 |
):
|
| 69 |
-
|
| 70 |
-
|
|
|
|
| 71 |
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
to RGB format if not already in that format.
|
| 79 |
-
prompt (str): Text description of the desired edit to apply to the image.
|
| 80 |
-
Examples: "Remove glasses", "Add a hat", "Change background to beach".
|
| 81 |
-
seed (int, optional): Random seed for reproducible generation. Defaults to 42.
|
| 82 |
-
Must be between 0 and MAX_SEED (2^31 - 1).
|
| 83 |
-
randomize_seed (bool, optional): If True, generates a random seed instead of
|
| 84 |
-
using the provided seed value. Defaults to False.
|
| 85 |
-
guidance_scale (float, optional): Controls how closely the model follows the
|
| 86 |
-
prompt. Higher values mean stronger adherence to the prompt but may reduce
|
| 87 |
-
image quality. Range: 1.0-10.0. Defaults to 2.5.
|
| 88 |
-
steps (int, optional): Controls how many steps to run the diffusion model for.
|
| 89 |
-
Range: 1-30. Defaults to 28.
|
| 90 |
-
progress (gr.Progress, optional): Gradio progress tracker for monitoring
|
| 91 |
-
generation progress. Defaults to gr.Progress(track_tqdm=True).
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
-
|
| 100 |
-
>>> edited_image, used_seed, button_update = infer(
|
| 101 |
-
... input_image=my_image,
|
| 102 |
-
... prompt="Add sunglasses",
|
| 103 |
-
... seed=123,
|
| 104 |
-
... randomize_seed=False,
|
| 105 |
-
... guidance_scale=2.5
|
| 106 |
-
... )
|
| 107 |
-
"""
|
| 108 |
-
if randomize_seed:
|
| 109 |
-
seed = random.randint(0, MAX_SEED)
|
| 110 |
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
guidance_scale=guidance_scale,
|
| 117 |
-
width = input_image.size[0] if width == -1 else width,
|
| 118 |
-
height = input_image.size[1] if height == -1 else height,
|
| 119 |
-
num_inference_steps=steps,
|
| 120 |
-
generator=torch.Generator().manual_seed(seed),
|
| 121 |
-
).images[0]
|
| 122 |
-
else:
|
| 123 |
-
image = pipe(
|
| 124 |
-
prompt=prompt,
|
| 125 |
-
guidance_scale=guidance_scale,
|
| 126 |
-
num_inference_steps=steps,
|
| 127 |
-
generator=torch.Generator().manual_seed(seed),
|
| 128 |
-
).images[0]
|
| 129 |
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
"""
|
| 155 |
Bundle compiled_transformer_1 and compiled_transformer_2 into a zip file and return the file path.
|
| 156 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
|
| 158 |
tmp_zip = tempfile.NamedTemporaryFile(suffix=".zip", delete=False)
|
| 159 |
tmp_zip.close()
|
| 160 |
|
| 161 |
with zipfile.ZipFile(tmp_zip.name, "w", compression=zipfile.ZIP_DEFLATED) as zf:
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
|
| 166 |
-
print(str(len(gallery)) + " images zipped")
|
| 167 |
return tmp_zip.name
|
| 168 |
|
| 169 |
-
|
| 170 |
-
#
|
| 171 |
-
|
| 172 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
}
|
|
|
|
| 174 |
|
|
|
|
| 175 |
#default_examples {
|
| 176 |
display:none;
|
| 177 |
}
|
| 178 |
"""
|
| 179 |
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
with gr.
|
| 187 |
-
with gr.
|
| 188 |
-
input_image = gr.Image(label="Upload the image for editing", type="pil")
|
| 189 |
with gr.Row():
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
|
|
|
|
|
|
| 198 |
with gr.Accordion("Advanced Settings", open=False):
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
)
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
maximum=
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
|
| 249 |
with gr.Row(elem_id="default_examples"):
|
| 250 |
-
download_button = gr.DownloadButton(elem_id="download_btn", interactive = True)
|
| 251 |
-
result_gallery = gr.Gallery(interactive = False, elem_id = "gallery1")
|
| 252 |
gr.Examples(
|
| 253 |
-
examples=[
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
outputs=[result_gallery, seed, download_button],
|
| 258 |
-
fn=infer_example,
|
| 259 |
run_on_click=True,
|
| 260 |
cache_examples=True,
|
| 261 |
-
cache_mode='lazy'
|
| 262 |
)
|
| 263 |
-
prompt_debug=gr.Textbox()
|
| 264 |
-
input_image_debug=gr.Image(type="pil")
|
| 265 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
def handle_field_debug_change(input_image_debug_data, prompt_debug_data, number_debug_data):
|
| 280 |
prompt_debug_value[0] = prompt_debug_data
|
| 281 |
-
|
| 282 |
-
|
|
|
|
|
|
|
| 283 |
return []
|
| 284 |
|
| 285 |
-
inputs_debug=[input_image_debug, prompt_debug,
|
| 286 |
|
| 287 |
input_image_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
|
|
|
|
| 288 |
prompt_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
)
|
| 302 |
|
| 303 |
-
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
# PyTorch 2.8 (temporary hack)
|
| 3 |
os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
|
| 4 |
|
| 5 |
+
# --- 1. Model Download and Setup (Diffusers Backend) ---
|
| 6 |
try:
|
| 7 |
import spaces
|
| 8 |
except:
|
|
|
|
| 12 |
return lambda *dummy_args, **dummy_kwargs: function(*dummy_args, **dummy_kwargs)
|
| 13 |
return decorator
|
| 14 |
|
|
|
|
|
|
|
| 15 |
import torch
|
| 16 |
+
from diffusers import FlowMatchEulerDiscreteScheduler
|
| 17 |
+
from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
|
| 18 |
+
from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
|
| 19 |
+
from diffusers.utils.export_utils import export_to_video
|
| 20 |
+
import gradio as gr
|
| 21 |
+
import imageio_ffmpeg
|
| 22 |
+
import tempfile
|
| 23 |
+
import shutil
|
| 24 |
+
import subprocess
|
| 25 |
+
import time
|
| 26 |
from datetime import datetime
|
| 27 |
+
import numpy as np
|
| 28 |
from PIL import Image
|
| 29 |
+
import random
|
| 30 |
+
import math
|
| 31 |
+
import traceback
|
| 32 |
+
import gc
|
| 33 |
+
from gradio_client import Client, handle_file # Import for API call
|
| 34 |
import zipfile
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
# Import optimization and access compiled artifacts
|
| 37 |
+
import optimization
|
| 38 |
|
| 39 |
+
# Import the optimization function from the separate file
|
| 40 |
from optimization import optimize_pipeline_
|
| 41 |
|
| 42 |
+
# --- Constants and Model Loading ---
|
| 43 |
+
MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
|
| 44 |
+
|
| 45 |
+
# --- NEW: Flexible Dimension Constants ---
|
| 46 |
+
MAX_DIMENSION = 832
|
| 47 |
+
MIN_DIMENSION = 480
|
| 48 |
+
DIMENSION_MULTIPLE = 16
|
| 49 |
+
SQUARE_SIZE = 480
|
| 50 |
+
|
| 51 |
MAX_SEED = np.iinfo(np.int32).max
|
| 52 |
|
| 53 |
+
FIXED_FPS = 24
|
| 54 |
+
MIN_FRAMES_MODEL = 8
|
| 55 |
+
MAX_FRAMES_MODEL = 81
|
| 56 |
+
|
| 57 |
+
MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS, 1)
|
| 58 |
+
MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS, 1)
|
| 59 |
|
| 60 |
input_image_debug_value = [None]
|
| 61 |
+
end_image_debug_value = [None]
|
| 62 |
prompt_debug_value = [None]
|
| 63 |
+
total_second_length_debug_value = [None]
|
| 64 |
+
resolution_debug_value = [None]
|
| 65 |
+
factor_debug_value = [None]
|
| 66 |
+
allocation_time_debug_value = [None]
|
| 67 |
+
|
| 68 |
+
default_negative_prompt = "Vibrant colors, overexposure, static, blurred details, subtitles, error, style, artwork, painting, image, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, many people in the background, walking backwards, overexposure, jumpcut, crossfader, "
|
| 69 |
+
|
| 70 |
+
transformer = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
|
| 71 |
+
subfolder='transformer',
|
| 72 |
+
torch_dtype=torch.bfloat16,
|
| 73 |
+
device_map='cuda',
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
transformer_2 = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
|
| 77 |
+
subfolder='transformer_2',
|
| 78 |
+
torch_dtype=torch.bfloat16,
|
| 79 |
+
device_map='cuda',
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
pipe = WanImageToVideoPipeline.from_pretrained(
|
| 83 |
+
MODEL_ID,
|
| 84 |
+
transformer = transformer,
|
| 85 |
+
transformer_2 = transformer_2,
|
| 86 |
+
torch_dtype=torch.bfloat16,
|
| 87 |
+
)
|
| 88 |
+
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config, shift=8.0)
|
| 89 |
+
pipe.to('cuda')
|
| 90 |
+
|
| 91 |
+
for i in range(3):
|
| 92 |
+
gc.collect()
|
| 93 |
+
torch.cuda.synchronize()
|
| 94 |
+
torch.cuda.empty_cache()
|
| 95 |
+
|
| 96 |
+
optimize_pipeline_(pipe,
|
| 97 |
+
image=Image.new('RGB', (MAX_DIMENSION, MIN_DIMENSION)),
|
| 98 |
+
prompt='prompt',
|
| 99 |
+
height=MIN_DIMENSION,
|
| 100 |
+
width=MAX_DIMENSION,
|
| 101 |
+
num_frames=MAX_FRAMES_MODEL,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def _escape_html(s: str) -> str:
|
| 105 |
+
return (s.replace("&", "&").replace("<", "<").replace(">", ">"))
|
| 106 |
+
|
| 107 |
+
def _error_to_html(err: BaseException) -> str:
|
| 108 |
+
tb = traceback.format_exc()
|
| 109 |
+
return (
|
| 110 |
+
"<div style='padding:12px;border:1px solid #ff4d4f;background:#fff1f0;color:#a8071a;border-radius:8px;'>"
|
| 111 |
+
"<b>Generation failed</b><br/>"
|
| 112 |
+
f"<b>{_escape_html(type(err).__name__)}</b>: {_escape_html(str(err))}"
|
| 113 |
+
"<details style='margin-top:8px;'>"
|
| 114 |
+
"<summary>Show traceback</summary>"
|
| 115 |
+
f"<pre style='white-space:pre-wrap;margin-top:8px;'>{_escape_html(tb)}</pre>"
|
| 116 |
+
"</details>"
|
| 117 |
+
"</div>"
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
# 20250508 pftq: for saving prompt to mp4 metadata comments
|
| 121 |
+
def set_mp4_comments_imageio_ffmpeg(input_file, comments):
|
| 122 |
+
try:
|
| 123 |
+
# Get the path to the bundled FFmpeg binary from imageio-ffmpeg
|
| 124 |
+
ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
|
| 125 |
+
|
| 126 |
+
# Check if input file exists
|
| 127 |
+
if not os.path.exists(input_file):
|
| 128 |
+
#print(f"Error: Input file {input_file} does not exist")
|
| 129 |
+
return False
|
| 130 |
+
|
| 131 |
+
# Create a temporary file path
|
| 132 |
+
temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
|
| 133 |
+
|
| 134 |
+
# FFmpeg command using the bundled binary
|
| 135 |
+
command = [
|
| 136 |
+
ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
|
| 137 |
+
'-i', input_file, # input file
|
| 138 |
+
'-metadata', f'comment={comments}', # set comment metadata
|
| 139 |
+
'-c:v', 'copy', # copy video stream without re-encoding
|
| 140 |
+
'-c:a', 'copy', # copy audio stream without re-encoding
|
| 141 |
+
'-y', # overwrite output file if it exists
|
| 142 |
+
temp_file # temporary output file
|
| 143 |
+
]
|
| 144 |
+
|
| 145 |
+
# Run the FFmpeg command
|
| 146 |
+
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
| 147 |
+
|
| 148 |
+
if result.returncode == 0:
|
| 149 |
+
# Replace the original file with the modified one
|
| 150 |
+
shutil.move(temp_file, input_file)
|
| 151 |
+
#print(f"Successfully added comments to {input_file}")
|
| 152 |
+
return True
|
| 153 |
+
else:
|
| 154 |
+
# Clean up temp file if FFmpeg fails
|
| 155 |
+
if os.path.exists(temp_file):
|
| 156 |
+
os.remove(temp_file)
|
| 157 |
+
#print(f"Error: FFmpeg failed with message:\n{result.stderr}")
|
| 158 |
+
return False
|
| 159 |
+
|
| 160 |
+
except Exception as e:
|
| 161 |
+
# Clean up temp file in case of other errors
|
| 162 |
+
if 'temp_file' in locals() and os.path.exists(temp_file):
|
| 163 |
+
os.remove(temp_file)
|
| 164 |
+
print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e))
|
| 165 |
+
return False
|
| 166 |
+
|
| 167 |
+
# --- 2. Image Processing and Application Logic ---
|
| 168 |
+
def generate_end_frame(start_img, gen_prompt, progress=gr.Progress(track_tqdm=True)):
|
| 169 |
+
"""Calls an external Gradio API to generate an image."""
|
| 170 |
+
if start_img is None:
|
| 171 |
+
raise gr.Error("Please provide a Start Frame first.")
|
| 172 |
+
|
| 173 |
+
hf_token = os.getenv("HF_TOKEN")
|
| 174 |
+
if not hf_token:
|
| 175 |
+
raise gr.Error("HF_TOKEN not found in environment variables. Please set it in your Space secrets.")
|
| 176 |
+
|
| 177 |
+
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmpfile:
|
| 178 |
+
start_img.save(tmpfile.name)
|
| 179 |
+
tmp_path = tmpfile.name
|
| 180 |
+
|
| 181 |
+
progress(0.1, desc="Connecting to image generation API...")
|
| 182 |
+
client = Client("multimodalart/nano-banana-private")
|
| 183 |
+
|
| 184 |
+
progress(0.5, desc=f"Generating with prompt: '{gen_prompt}'...")
|
| 185 |
+
try:
|
| 186 |
+
result = client.predict(
|
| 187 |
+
prompt=gen_prompt,
|
| 188 |
+
images=[
|
| 189 |
+
{"image": handle_file(tmp_path)}
|
| 190 |
+
],
|
| 191 |
+
manual_token=hf_token,
|
| 192 |
+
api_name="/unified_image_generator"
|
| 193 |
+
)
|
| 194 |
+
finally:
|
| 195 |
+
os.remove(tmp_path)
|
| 196 |
+
|
| 197 |
+
progress(1.0, desc="Done!")
|
| 198 |
+
print(result)
|
| 199 |
+
return result
|
| 200 |
+
|
| 201 |
+
def switch_to_upload_tab():
|
| 202 |
+
"""Returns a gr.Tabs update to switch to the first tab."""
|
| 203 |
+
return gr.Tabs(selected="upload_tab")
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def process_image_for_video(image: Image.Image, resolution: int) -> Image.Image:
|
| 207 |
"""
|
| 208 |
+
Resizes an image based on the following rules for video generation.
|
|
|
|
| 209 |
"""
|
| 210 |
+
width, height = image.size
|
| 211 |
+
|
| 212 |
+
if resolution < width * height:
|
| 213 |
+
scale = ((width * height) / resolution)**(.5)
|
| 214 |
+
new_width = width / scale
|
| 215 |
+
new_height = height / scale
|
| 216 |
+
final_width = int(math.floor(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
|
| 217 |
+
final_height = int(math.floor(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
|
| 218 |
+
|
| 219 |
+
elif width * height < (MIN_DIMENSION**2):
|
| 220 |
+
scale = ((MIN_DIMENSION**2) / (width * height))**(.5)
|
| 221 |
+
new_width = width * scale
|
| 222 |
+
new_height = height * scale
|
| 223 |
+
final_width = int(math.ceil(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
|
| 224 |
+
final_height = int(math.ceil(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
|
| 225 |
+
|
| 226 |
+
else:
|
| 227 |
+
final_width = int(round(width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
|
| 228 |
+
final_height = int(round(height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
|
| 229 |
+
|
| 230 |
+
return image.resize((final_width, final_height), Image.Resampling.LANCZOS)
|
| 231 |
+
|
| 232 |
+
def resize_and_crop_to_match(target_image, reference_image):
|
| 233 |
+
"""Resizes the target image to match the reference image's dimensions."""
|
| 234 |
+
ref_width, ref_height = reference_image.size
|
| 235 |
+
return target_image.resize((ref_width, ref_height), Image.Resampling.LANCZOS)
|
| 236 |
|
| 237 |
+
def crop_to_match(target_image, reference_image):
|
| 238 |
+
"""Resizes and center-crops the target image to match the reference image's dimensions."""
|
| 239 |
+
ref_width, ref_height = reference_image.size
|
| 240 |
+
target_width, target_height = target_image.size
|
| 241 |
+
scale = max(ref_width / target_width, ref_height / target_height)
|
| 242 |
+
new_width, new_height = int(target_width * scale), int(target_height * scale)
|
| 243 |
+
resized = target_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
| 244 |
+
left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
|
| 245 |
+
return resized.crop((left, top, left + ref_width, top + ref_height))
|
| 246 |
+
|
| 247 |
+
def init_view():
|
| 248 |
+
return gr.update(interactive = True)
|
| 249 |
+
|
| 250 |
+
def output_video_change(output_video):
|
| 251 |
+
print('Log output: ' + str(output_video))
|
| 252 |
+
return [gr.update(visible = True)] * 2
|
| 253 |
+
|
| 254 |
+
def generate_video(
|
| 255 |
+
start_image_pil,
|
| 256 |
+
end_image_pil,
|
| 257 |
prompt,
|
| 258 |
+
negative_prompt=default_negative_prompt,
|
| 259 |
+
resolution=500000,
|
| 260 |
+
duration_seconds=2.1,
|
| 261 |
+
steps=8,
|
| 262 |
+
guidance_scale=1,
|
| 263 |
+
guidance_scale_2=1,
|
| 264 |
+
seed=42,
|
| 265 |
+
randomize_seed=True,
|
| 266 |
progress=gr.Progress(track_tqdm=True)
|
| 267 |
):
|
| 268 |
+
start = time.time()
|
| 269 |
+
allocation_time = 120
|
| 270 |
+
factor = 1
|
| 271 |
|
| 272 |
+
if input_image_debug_value[0] is not None or end_image_debug_value[0] is not None or prompt_debug_value[0] is not None or total_second_length_debug_value[0] is not None or allocation_time_debug_value[0] is not None or resolution_debug_value[0] is not None or factor_debug_value[0] is not None:
|
| 273 |
+
start_image_pil = input_image_debug_value[0]
|
| 274 |
+
end_image_pil = end_image_debug_value[0]
|
| 275 |
+
prompt = prompt_debug_value[0]
|
| 276 |
+
duration_seconds = total_second_length_debug_value[0]
|
| 277 |
+
resolution = resolution_debug_value[0]
|
| 278 |
+
factor = factor_debug_value[0]
|
| 279 |
+
allocation_time = allocation_time_debug_value[0]
|
| 280 |
+
|
| 281 |
+
if start_image_pil is None or end_image_pil is None:
|
| 282 |
+
raise gr.Error("Please upload both a start and an end image.")
|
| 283 |
+
|
| 284 |
+
# Step 1: Process the start image to get our target dimensions based on the new rules.
|
| 285 |
+
processed_start_image = process_image_for_video(start_image_pil, resolution)
|
| 286 |
|
| 287 |
+
# Step 2: Make the end image match the *exact* dimensions of the processed start image.
|
| 288 |
+
processed_end_image = resize_and_crop_to_match(end_image_pil, processed_start_image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
|
| 290 |
+
target_height, target_width = processed_start_image.height, processed_start_image.width
|
| 291 |
+
|
| 292 |
+
# Handle seed and frame count
|
| 293 |
+
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
|
| 294 |
+
random.seed(current_seed)
|
| 295 |
+
torch.manual_seed(current_seed)
|
| 296 |
+
num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
|
| 297 |
+
|
| 298 |
+
progress(0.2, desc=f"Generating {num_frames} frames at {target_width}x{target_height} (seed: {current_seed})...")
|
| 299 |
+
|
| 300 |
+
progress(0.1, desc="Preprocessing images...")
|
| 301 |
+
print("Generate a video with the prompt: " + prompt)
|
| 302 |
+
output_frames_list = None
|
| 303 |
+
caught_error = None
|
| 304 |
+
while factor >= 1 and int(allocation_time) > 0:
|
| 305 |
+
try:
|
| 306 |
+
output_frames_list = generate_video_on_gpu(
|
| 307 |
+
start_image_pil,
|
| 308 |
+
end_image_pil,
|
| 309 |
+
prompt,
|
| 310 |
+
negative_prompt,
|
| 311 |
+
int(steps),
|
| 312 |
+
float(guidance_scale),
|
| 313 |
+
float(guidance_scale_2),
|
| 314 |
+
progress,
|
| 315 |
+
allocation_time,
|
| 316 |
+
target_height,
|
| 317 |
+
target_width,
|
| 318 |
+
current_seed,
|
| 319 |
+
(int(((num_frames * factor) - 1) / 4) * 4) + 1,
|
| 320 |
+
processed_start_image,
|
| 321 |
+
processed_end_image
|
| 322 |
+
)
|
| 323 |
+
factor = 0
|
| 324 |
+
caught_error = None
|
| 325 |
+
except BaseException as err:
|
| 326 |
+
print("An exception occurred: " + str(err))
|
| 327 |
+
caught_error = err
|
| 328 |
+
try:
|
| 329 |
+
print('e.message: ' + err.message) # No GPU is currently available for you after 60s
|
| 330 |
+
except Exception as e2:
|
| 331 |
+
print('Failure')
|
| 332 |
+
if not str(err).startswith("No GPU is currently available for you after 60s"):
|
| 333 |
+
factor -= .003
|
| 334 |
+
allocation_time = int(allocation_time) - 1
|
| 335 |
+
except:
|
| 336 |
+
print("An error occurred")
|
| 337 |
+
caught_error = None
|
| 338 |
+
if not str(e).startswith("No GPU is currently available for you after 60s"):
|
| 339 |
+
factor -= .003
|
| 340 |
+
allocation_time = int(allocation_time) - 1
|
| 341 |
+
|
| 342 |
+
if caught_error is not None:
|
| 343 |
+
return [gr.skip(), gr.skip(), gr.skip(), gr.update(value=_error_to_html(caught_error), visible=True), gr.skip()]
|
| 344 |
+
|
| 345 |
+
input_image_debug_value[0] = end_image_debug_value[0] = prompt_debug_value[0] = total_second_length_debug_value[0] = allocation_time_debug_value[0] = factor_debug_value[0] = None
|
| 346 |
|
| 347 |
+
progress(0.9, desc="Encoding and saving video...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 348 |
|
| 349 |
+
video_path = 'wan_' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S.%f") + '.mp4'
|
| 350 |
+
|
| 351 |
+
export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
|
| 352 |
+
set_mp4_comments_imageio_ffmpeg(video_path, f"Prompt: {prompt} | Negative Prompt: {negative_prompt}");
|
| 353 |
+
print("Video exported: " + video_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 354 |
|
| 355 |
+
progress(1.0, desc="Done!")
|
| 356 |
+
end = time.time()
|
| 357 |
+
secondes = int(end - start)
|
| 358 |
+
minutes = math.floor(secondes / 60)
|
| 359 |
+
secondes = secondes - (minutes * 60)
|
| 360 |
+
hours = math.floor(minutes / 60)
|
| 361 |
+
minutes = minutes - (hours * 60)
|
| 362 |
+
information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
|
| 363 |
+
"The video been generated in " + \
|
| 364 |
+
((str(hours) + " h, ") if hours != 0 else "") + \
|
| 365 |
+
((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
|
| 366 |
+
str(secondes) + " sec (including " + str(allocation_time) + " seconds of GPU). " + \
|
| 367 |
+
"The video has " + str(int(num_frames * factor)) + " frames. " + \
|
| 368 |
+
"The video resolution is " + str(target_width) + \
|
| 369 |
+
" pixels large and " + str(target_height) + \
|
| 370 |
+
" pixels high, so a resolution of " + f'{target_width * target_height:,}' + " pixels." + \
|
| 371 |
+
" Your prompt is saved into the metadata of the video."
|
| 372 |
+
return [video_path, gr.update(value = video_path, visible = True, interactive = True), current_seed, gr.update(value = information, visible = True), gr.update(interactive = False)]
|
| 373 |
+
|
| 374 |
+
def get_duration(
|
| 375 |
+
start_image_pil,
|
| 376 |
+
end_image_pil,
|
| 377 |
+
prompt,
|
| 378 |
+
negative_prompt,
|
| 379 |
+
steps,
|
| 380 |
+
guidance_scale,
|
| 381 |
+
guidance_scale_2,
|
| 382 |
+
progress,
|
| 383 |
+
allocation_time,
|
| 384 |
+
target_height,
|
| 385 |
+
target_width,
|
| 386 |
+
current_seed,
|
| 387 |
+
num_frames,
|
| 388 |
+
processed_start_image,
|
| 389 |
+
processed_end_image
|
| 390 |
+
):
|
| 391 |
+
return allocation_time
|
| 392 |
|
| 393 |
+
@torch.no_grad()
|
| 394 |
+
@spaces.GPU(duration=get_duration)
|
| 395 |
+
def generate_video_on_gpu(
|
| 396 |
+
start_image_pil,
|
| 397 |
+
end_image_pil,
|
| 398 |
+
prompt,
|
| 399 |
+
negative_prompt,
|
| 400 |
+
steps,
|
| 401 |
+
guidance_scale,
|
| 402 |
+
guidance_scale_2,
|
| 403 |
+
progress,
|
| 404 |
+
allocation_time,
|
| 405 |
+
target_height,
|
| 406 |
+
target_width,
|
| 407 |
+
current_seed,
|
| 408 |
+
num_frames,
|
| 409 |
+
processed_start_image,
|
| 410 |
+
processed_end_image
|
| 411 |
+
):
|
| 412 |
+
"""
|
| 413 |
+
Generates a video by interpolating between a start and end image, guided by a text prompt,
|
| 414 |
+
using the diffusers Wan2.2 pipeline.
|
| 415 |
+
"""
|
| 416 |
+
|
| 417 |
+
output_frames_list = pipe(
|
| 418 |
+
image=processed_start_image,
|
| 419 |
+
last_image=processed_end_image,
|
| 420 |
+
prompt=prompt,
|
| 421 |
+
negative_prompt=negative_prompt,
|
| 422 |
+
height=target_height,
|
| 423 |
+
width=target_width,
|
| 424 |
+
num_frames=num_frames,
|
| 425 |
+
guidance_scale=guidance_scale,
|
| 426 |
+
guidance_scale_2=guidance_scale_2,
|
| 427 |
+
num_inference_steps=steps,
|
| 428 |
+
generator=torch.Generator(device="cuda").manual_seed(current_seed),
|
| 429 |
+
).frames[0]
|
| 430 |
+
|
| 431 |
+
return output_frames_list
|
| 432 |
+
|
| 433 |
+
def export_compiled_transformers_to_zip() -> str:
|
| 434 |
"""
|
| 435 |
Bundle compiled_transformer_1 and compiled_transformer_2 into a zip file and return the file path.
|
| 436 |
"""
|
| 437 |
+
ct1 = getattr(optimization, "COMPILED_TRANSFORMER_1", None)
|
| 438 |
+
ct2 = getattr(optimization, "COMPILED_TRANSFORMER_2", None)
|
| 439 |
+
|
| 440 |
+
if ct1 is None or ct2 is None:
|
| 441 |
+
raise gr.Error("Compiled transformers are not available yet (compilation may have failed).")
|
| 442 |
+
|
| 443 |
+
payload_1 = ct1.to_serializable_dict()
|
| 444 |
+
payload_2 = ct2.to_serializable_dict()
|
| 445 |
|
| 446 |
tmp_zip = tempfile.NamedTemporaryFile(suffix=".zip", delete=False)
|
| 447 |
tmp_zip.close()
|
| 448 |
|
| 449 |
with zipfile.ZipFile(tmp_zip.name, "w", compression=zipfile.ZIP_DEFLATED) as zf:
|
| 450 |
+
# store with torch.save so users can load easily with torch.load()
|
| 451 |
+
buf1 = tempfile.NamedTemporaryFile(suffix=".pt", delete=False)
|
| 452 |
+
buf1.close()
|
| 453 |
+
torch.save(payload_1, buf1.name)
|
| 454 |
+
|
| 455 |
+
buf2 = tempfile.NamedTemporaryFile(suffix=".pt", delete=False)
|
| 456 |
+
buf2.close()
|
| 457 |
+
torch.save(payload_2, buf2.name)
|
| 458 |
+
|
| 459 |
+
zf.write(buf1.name, arcname="compiled_transformer_1.pt")
|
| 460 |
+
zf.write(buf2.name, arcname="compiled_transformer_2.pt")
|
| 461 |
+
|
| 462 |
+
# cleanup intermediate .pt
|
| 463 |
+
try:
|
| 464 |
+
os.remove(buf1.name)
|
| 465 |
+
os.remove(buf2.name)
|
| 466 |
+
except:
|
| 467 |
+
pass
|
| 468 |
|
|
|
|
| 469 |
return tmp_zip.name
|
| 470 |
|
| 471 |
+
|
| 472 |
+
# --- 3. Gradio User Interface ---
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
js = """
|
| 477 |
+
function createGradioAnimation() {
|
| 478 |
+
window.addEventListener("beforeunload", function(e) {
|
| 479 |
+
if (document.getElementById('dummy_button_id') && !document.getElementById('dummy_button_id').disabled) {
|
| 480 |
+
var confirmationMessage = 'A process is still running. '
|
| 481 |
+
+ 'If you leave before saving, your changes will be lost.';
|
| 482 |
+
|
| 483 |
+
(e || window.event).returnValue = confirmationMessage;
|
| 484 |
+
}
|
| 485 |
+
return confirmationMessage;
|
| 486 |
+
});
|
| 487 |
+
return 'Animation created';
|
| 488 |
}
|
| 489 |
+
"""
|
| 490 |
|
| 491 |
+
css="""
|
| 492 |
#default_examples {
|
| 493 |
display:none;
|
| 494 |
}
|
| 495 |
"""
|
| 496 |
|
| 497 |
+
# Gradio interface
|
| 498 |
+
with gr.Blocks(js=js, css=css) as app:
|
| 499 |
+
gr.Markdown("# Wan 2.2 First/Last Frame Video Fast")
|
| 500 |
+
gr.Markdown("Based on the [Wan 2.2 First/Last Frame workflow](https://www.reddit.com/r/StableDiffusion/comments/1me4306/psa_wan_22_does_first_frame_last_frame_out_of_the/), applied to 🧨 Diffusers + [lightx2v/Wan2.2-Lightning](https://huggingface.co/lightx2v/Wan2.2-Lightning) 8-step LoRA")
|
| 501 |
+
|
| 502 |
+
with gr.Row(elem_id="general_items"):
|
| 503 |
+
with gr.Column():
|
| 504 |
+
with gr.Group(elem_id="group_all"):
|
|
|
|
| 505 |
with gr.Row():
|
| 506 |
+
start_image = gr.Image(type="pil", label="Start Frame", sources=["upload", "clipboard"])
|
| 507 |
+
# Capture the Tabs component in a variable and assign IDs to tabs
|
| 508 |
+
with gr.Tabs(elem_id="group_tabs") as tabs:
|
| 509 |
+
with gr.TabItem("Upload", id="upload_tab"):
|
| 510 |
+
end_image = gr.Image(type="pil", label="End Frame", sources=["upload", "clipboard"])
|
| 511 |
+
with gr.TabItem("Generate", id="generate_tab"):
|
| 512 |
+
generate_5seconds = gr.Button("Generate scene 5 seconds in the future", elem_id="fivesec")
|
| 513 |
+
gr.Markdown("Generate a custom end-frame with an edit model like [Nano Banana](https://huggingface.co/spaces/multimodalart/nano-banana) or [Qwen Image Edit](https://huggingface.co/spaces/multimodalart/Qwen-Image-Edit-Fast)", elem_id="or_item")
|
| 514 |
+
prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images", placeholder="The creature starts to move")
|
| 515 |
+
|
| 516 |
with gr.Accordion("Advanced Settings", open=False):
|
| 517 |
+
duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.1, label="Video Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
|
| 518 |
+
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
|
| 519 |
+
resolution = gr.Dropdown([
|
| 520 |
+
["400,000 px (working)", 400000],
|
| 521 |
+
["465,920 px (working)", 465920],
|
| 522 |
+
["495,616 px (working)", 495616],
|
| 523 |
+
["500,000 px (working)", 500000],
|
| 524 |
+
["600,000 px (working)", 600000],
|
| 525 |
+
["700,000 px (working)", 700000],
|
| 526 |
+
["800,000 px (working)", 800000],
|
| 527 |
+
["900,000 px (working)", 900000],
|
| 528 |
+
["1,000,000 px (working)", 1000000],
|
| 529 |
+
["1,100,000 px (untested)", 1100000],
|
| 530 |
+
["1,200,000 px (untested)", 1200000],
|
| 531 |
+
["1,300,000 px (untested)", 1300000],
|
| 532 |
+
["1,400,000 px (untested)", 1400000],
|
| 533 |
+
["1,500,000 px (untested)", 1500000]
|
| 534 |
+
], value=465920, label="Resolution (width x height)", info="Less if the image is smaller")
|
| 535 |
+
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=8, label="Inference Steps")
|
| 536 |
+
guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
|
| 537 |
+
guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - low noise")
|
| 538 |
+
with gr.Row():
|
| 539 |
+
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
|
| 540 |
+
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", info="Even if it's unchecked, the generation is always different", value=True)
|
| 541 |
+
|
| 542 |
+
generate_button = gr.Button("🚀 Generate Video", variant="primary")
|
| 543 |
+
dummy_button = gr.Button(elem_id = "dummy_button_id", visible = False, interactive = False)
|
| 544 |
+
|
| 545 |
+
with gr.Column():
|
| 546 |
+
output_video = gr.Video(elem_id="output_id", label="Generated Video", autoplay = True, loop = True)
|
| 547 |
+
download_button = gr.DownloadButton(elem_id="download_btn", interactive = True)
|
| 548 |
+
video_information = gr.HTML(value = "")
|
| 549 |
+
|
| 550 |
+
with gr.Accordion("🔧 Compilation artifacts (advanced)", open=False):
|
| 551 |
+
gr.Markdown(
|
| 552 |
+
"Télécharge les artefacts compilés AOTInductor générés au démarrage (transformer + transformer_2)."
|
| 553 |
+
)
|
| 554 |
+
export_btn = gr.Button("📦 Préparer l'archive des transformers compilés")
|
| 555 |
+
compiled_download = gr.DownloadButton(label="⬇️ Télécharger compiled_transformers.zip", interactive=False)
|
| 556 |
+
|
| 557 |
+
def _build_and_enable_download():
|
| 558 |
+
path = export_compiled_transformers_to_zip()
|
| 559 |
+
return gr.update(value=path, interactive=True)
|
| 560 |
+
|
| 561 |
+
export_btn.click(fn=_build_and_enable_download, inputs=None, outputs=compiled_download)
|
| 562 |
+
|
| 563 |
+
# Main video generation button
|
| 564 |
+
ui_inputs = [
|
| 565 |
+
start_image,
|
| 566 |
+
end_image,
|
| 567 |
+
prompt,
|
| 568 |
+
negative_prompt_input,
|
| 569 |
+
resolution,
|
| 570 |
+
duration_seconds_input,
|
| 571 |
+
steps_slider,
|
| 572 |
+
guidance_scale_input,
|
| 573 |
+
guidance_scale_2_input,
|
| 574 |
+
seed_input,
|
| 575 |
+
randomize_seed_checkbox
|
| 576 |
+
]
|
| 577 |
+
ui_outputs = [output_video, download_button, seed_input, video_information, dummy_button]
|
| 578 |
+
|
| 579 |
+
generate_button.click(fn = init_view, inputs = [], outputs = [dummy_button], queue = False, show_progress = False).success(
|
| 580 |
+
fn = generate_video,
|
| 581 |
+
inputs = ui_inputs,
|
| 582 |
+
outputs = ui_outputs
|
| 583 |
+
)
|
| 584 |
+
|
| 585 |
+
generate_5seconds.click(
|
| 586 |
+
fn=switch_to_upload_tab,
|
| 587 |
+
inputs=None,
|
| 588 |
+
outputs=[tabs]
|
| 589 |
+
).then(
|
| 590 |
+
fn=lambda img: generate_end_frame(img, "this image is a still frame from a movie. generate a new frame with what happens on this scene 5 seconds in the future"),
|
| 591 |
+
inputs=[start_image],
|
| 592 |
+
outputs=[end_image]
|
| 593 |
+
).success(
|
| 594 |
+
fn=generate_video,
|
| 595 |
+
inputs=ui_inputs,
|
| 596 |
+
outputs=ui_outputs
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
output_video.change(
|
| 600 |
+
fn=output_video_change,
|
| 601 |
+
inputs=[output_video],
|
| 602 |
+
outputs=[download_button, video_information],
|
| 603 |
+
js="document.getElementById('download_btn').click()"
|
| 604 |
+
).then(
|
| 605 |
+
fn=lambda: None,
|
| 606 |
+
inputs=[],
|
| 607 |
+
outputs=[],
|
| 608 |
+
js="document.getElementById('output_id').scrollIntoView()"
|
| 609 |
+
)
|
| 610 |
|
| 611 |
with gr.Row(elem_id="default_examples"):
|
|
|
|
|
|
|
| 612 |
gr.Examples(
|
| 613 |
+
examples=[["Schoolboy_without_backpack.webp", "Schoolboy_with_backpack.webp", "The schoolboy puts on his schoolbag."]],
|
| 614 |
+
inputs=[start_image, end_image, prompt],
|
| 615 |
+
outputs=ui_outputs,
|
| 616 |
+
fn=generate_video,
|
|
|
|
|
|
|
| 617 |
run_on_click=True,
|
| 618 |
cache_examples=True,
|
| 619 |
+
cache_mode='lazy',
|
| 620 |
)
|
| 621 |
+
prompt_debug=gr.Textbox(label="Prompt Debug")
|
| 622 |
+
input_image_debug=gr.Image(type="pil", label="Image Debug")
|
| 623 |
+
end_image_debug=gr.Image(type="pil", label="End Image Debug")
|
| 624 |
+
total_second_length_debug=gr.Slider(label="Duration Debug", minimum=1, maximum=120, value=5, step=0.1)
|
| 625 |
+
resolution_debug = gr.Dropdown([
|
| 626 |
+
["400,000 px", 400000],
|
| 627 |
+
["465,920 px", 465920],
|
| 628 |
+
["495,616 px", 495616],
|
| 629 |
+
["500,000 px", 500000],
|
| 630 |
+
["600,000 px", 600000],
|
| 631 |
+
["700,000 px", 700000],
|
| 632 |
+
["800,000 px", 800000],
|
| 633 |
+
["900,000 px", 900000],
|
| 634 |
+
["1,000,000 px", 1000000],
|
| 635 |
+
["1,100,000 px", 1100000],
|
| 636 |
+
["1,200,000 px", 1200000],
|
| 637 |
+
["1,300,000 px", 1300000],
|
| 638 |
+
["1,400,000 px", 1400000],
|
| 639 |
+
["1,500,000 px", 1500000]
|
| 640 |
+
], value=500000, label="Resolution Debug")
|
| 641 |
+
factor_debug=gr.Slider(label="Factor Debug", minimum=1, maximum=100, value=4.5, step=0.1)
|
| 642 |
+
allocation_time_debug=gr.Slider(label="Allocation Debug", minimum=1, maximum=60 * 60, value=1200, step=1)
|
| 643 |
|
| 644 |
+
def handle_field_debug_change(
|
| 645 |
+
input_image_debug_data,
|
| 646 |
+
end_image_debug_data,
|
| 647 |
+
prompt_debug_data,
|
| 648 |
+
total_second_length_debug_data,
|
| 649 |
+
resolution_debug_data,
|
| 650 |
+
factor_debug_data,
|
| 651 |
+
allocation_time_debug_data
|
| 652 |
+
):
|
| 653 |
+
input_image_debug_value[0] = input_image_debug_data if input_image_debug_data is not None else end_image_debug_data
|
| 654 |
+
end_image_debug_value[0] = end_image_debug_data if end_image_debug_data is not None else input_image_debug_data
|
|
|
|
|
|
|
| 655 |
prompt_debug_value[0] = prompt_debug_data
|
| 656 |
+
total_second_length_debug_value[0] = total_second_length_debug_data
|
| 657 |
+
resolution_debug_value[0] = resolution_debug_data
|
| 658 |
+
factor_debug_value[0] = factor_debug_data
|
| 659 |
+
allocation_time_debug_value[0] = allocation_time_debug_data
|
| 660 |
return []
|
| 661 |
|
| 662 |
+
inputs_debug=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug, resolution_debug, factor_debug, allocation_time_debug]
|
| 663 |
|
| 664 |
input_image_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
|
| 665 |
+
end_image_debug.upload(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
|
| 666 |
prompt_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
|
| 667 |
+
total_second_length_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
|
| 668 |
+
resolution_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
|
| 669 |
+
factor_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
|
| 670 |
+
allocation_time_debug.change(fn=handle_field_debug_change, inputs=inputs_debug, outputs=[])
|
| 671 |
+
|
| 672 |
+
gr.Examples(
|
| 673 |
+
label = "Examples from demo",
|
| 674 |
+
examples = [
|
| 675 |
+
["poli_tower.png", "tower_takes_off.png", "The man turns around."],
|
| 676 |
+
["ugly_sonic.jpeg", "squatting_sonic.png", "पात्रं क्षेपणास्त्रं चकमाति।"],
|
| 677 |
+
["Schoolboy_without_backpack.webp", "Schoolboy_with_backpack.webp", "The schoolboy puts on his schoolbag."],
|
| 678 |
+
],
|
| 679 |
+
inputs = [start_image, end_image, prompt],
|
| 680 |
+
outputs = ui_outputs,
|
| 681 |
+
fn = generate_video,
|
| 682 |
+
cache_examples = False,
|
| 683 |
)
|
| 684 |
|
| 685 |
+
if __name__ == "__main__":
|
| 686 |
+
app.launch(mcp_server=True, share=True)
|
optimization.py
CHANGED
|
@@ -5,22 +5,37 @@ from typing import Any
|
|
| 5 |
from typing import Callable
|
| 6 |
from typing import ParamSpec
|
| 7 |
|
|
|
|
| 8 |
import spaces
|
| 9 |
import torch
|
| 10 |
from torch.utils._pytree import tree_map_only
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
from optimization_utils import capture_component_call
|
| 13 |
from optimization_utils import aoti_compile
|
|
|
|
|
|
|
| 14 |
|
| 15 |
|
| 16 |
P = ParamSpec('P')
|
| 17 |
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
|
|
|
|
|
|
|
| 20 |
|
| 21 |
TRANSFORMER_DYNAMIC_SHAPES = {
|
| 22 |
-
'hidden_states': {
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
| 24 |
}
|
| 25 |
|
| 26 |
INDUCTOR_CONFIGS = {
|
|
@@ -33,28 +48,109 @@ INDUCTOR_CONFIGS = {
|
|
| 33 |
}
|
| 34 |
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
def optimize_pipeline_(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kwargs):
|
|
|
|
| 37 |
|
| 38 |
@spaces.GPU(duration=1500)
|
| 39 |
def compile_transformer():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
with capture_component_call(pipeline,
|
| 42 |
pipeline(*args, **kwargs)
|
| 43 |
|
| 44 |
dynamic_shapes = tree_map_only((torch.Tensor, bool), lambda t: None, call.kwargs)
|
| 45 |
dynamic_shapes |= TRANSFORMER_DYNAMIC_SHAPES
|
| 46 |
|
| 47 |
-
pipeline.transformer
|
|
|
|
| 48 |
|
| 49 |
-
|
| 50 |
mod=pipeline.transformer,
|
| 51 |
args=call.args,
|
| 52 |
kwargs=call.kwargs,
|
| 53 |
dynamic_shapes=dynamic_shapes,
|
| 54 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
from typing import Callable
|
| 6 |
from typing import ParamSpec
|
| 7 |
|
| 8 |
+
import os
|
| 9 |
import spaces
|
| 10 |
import torch
|
| 11 |
from torch.utils._pytree import tree_map_only
|
| 12 |
+
from torchao.quantization import quantize_
|
| 13 |
+
from torchao.quantization import Float8DynamicActivationFloat8WeightConfig
|
| 14 |
+
from torchao.quantization import Int8WeightOnlyConfig
|
| 15 |
+
from huggingface_hub import hf_hub_download
|
| 16 |
|
| 17 |
from optimization_utils import capture_component_call
|
| 18 |
from optimization_utils import aoti_compile
|
| 19 |
+
from optimization_utils import drain_module_parameters
|
| 20 |
+
from optimization_utils import ZeroGPUCompiledModelFromDict # NEW
|
| 21 |
|
| 22 |
|
| 23 |
P = ParamSpec('P')
|
| 24 |
|
| 25 |
+
# Expose compiled models so app.py can offer them for download
|
| 26 |
+
COMPILED_TRANSFORMER_1 = None
|
| 27 |
+
COMPILED_TRANSFORMER_2 = None
|
| 28 |
|
| 29 |
+
LATENT_FRAMES_DIM = torch.export.Dim('num_latent_frames', min=8, max=81)
|
| 30 |
+
LATENT_PATCHED_HEIGHT_DIM = torch.export.Dim('latent_patched_height', min=30, max=52)
|
| 31 |
+
LATENT_PATCHED_WIDTH_DIM = torch.export.Dim('latent_patched_width', min=30, max=52)
|
| 32 |
|
| 33 |
TRANSFORMER_DYNAMIC_SHAPES = {
|
| 34 |
+
'hidden_states': {
|
| 35 |
+
2: LATENT_FRAMES_DIM,
|
| 36 |
+
3: 2 * LATENT_PATCHED_HEIGHT_DIM,
|
| 37 |
+
4: 2 * LATENT_PATCHED_WIDTH_DIM,
|
| 38 |
+
},
|
| 39 |
}
|
| 40 |
|
| 41 |
INDUCTOR_CONFIGS = {
|
|
|
|
| 48 |
}
|
| 49 |
|
| 50 |
|
| 51 |
+
def load_compiled_transformers_from_hub(
|
| 52 |
+
repo_id: str,
|
| 53 |
+
filename_1: str = "compiled_transformer_1.pt",
|
| 54 |
+
filename_2: str = "compiled_transformer_2.pt",
|
| 55 |
+
device: str = "cuda",
|
| 56 |
+
):
|
| 57 |
+
"""
|
| 58 |
+
Loads the payload dicts (created via ZeroGPUCompiledModel.to_serializable_dict() and torch.save)
|
| 59 |
+
and rebuilds callable models that will move constants to CUDA on first call.
|
| 60 |
+
"""
|
| 61 |
+
path_1 = hf_hub_download(repo_id=repo_id, filename=filename_1)
|
| 62 |
+
path_2 = hf_hub_download(repo_id=repo_id, filename=filename_2)
|
| 63 |
+
|
| 64 |
+
payload_1 = torch.load(path_1, map_location="cpu", weights_only=False)
|
| 65 |
+
payload_2 = torch.load(path_2, map_location="cpu", weights_only=False)
|
| 66 |
+
|
| 67 |
+
if not isinstance(payload_1, dict) or not isinstance(payload_2, dict):
|
| 68 |
+
raise TypeError("Precompiled files are not payload dicts. Please re-export them with to_serializable_dict().")
|
| 69 |
+
|
| 70 |
+
compiled_1 = ZeroGPUCompiledModelFromDict(payload_1, device=device)
|
| 71 |
+
compiled_2 = ZeroGPUCompiledModelFromDict(payload_2, device=device)
|
| 72 |
+
return compiled_1, compiled_2
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _strtobool(v: str | None, default: bool = True) -> bool:
|
| 76 |
+
if v is None:
|
| 77 |
+
return default
|
| 78 |
+
return v.strip().lower() in ("1", "true", "yes", "y", "on")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
def optimize_pipeline_(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kwargs):
|
| 82 |
+
global COMPILED_TRANSFORMER_1, COMPILED_TRANSFORMER_2
|
| 83 |
|
| 84 |
@spaces.GPU(duration=1500)
|
| 85 |
def compile_transformer():
|
| 86 |
+
pipeline.load_lora_weights(
|
| 87 |
+
"Kijai/WanVideo_comfy",
|
| 88 |
+
weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
|
| 89 |
+
adapter_name="lightx2v",
|
| 90 |
+
)
|
| 91 |
+
kwargs_lora = {"load_into_transformer_2": True}
|
| 92 |
+
pipeline.load_lora_weights(
|
| 93 |
+
"Kijai/WanVideo_comfy",
|
| 94 |
+
weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
|
| 95 |
+
adapter_name="lightx2v_2",
|
| 96 |
+
**kwargs_lora,
|
| 97 |
+
)
|
| 98 |
+
pipeline.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1.0, 1.0])
|
| 99 |
+
pipeline.fuse_lora(adapter_names=["lightx2v"], lora_scale=3.0, components=["transformer"])
|
| 100 |
+
pipeline.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1.0, components=["transformer_2"])
|
| 101 |
+
pipeline.unload_lora_weights()
|
| 102 |
|
| 103 |
+
with capture_component_call(pipeline, "transformer") as call:
|
| 104 |
pipeline(*args, **kwargs)
|
| 105 |
|
| 106 |
dynamic_shapes = tree_map_only((torch.Tensor, bool), lambda t: None, call.kwargs)
|
| 107 |
dynamic_shapes |= TRANSFORMER_DYNAMIC_SHAPES
|
| 108 |
|
| 109 |
+
quantize_(pipeline.transformer, Float8DynamicActivationFloat8WeightConfig())
|
| 110 |
+
quantize_(pipeline.transformer_2, Float8DynamicActivationFloat8WeightConfig())
|
| 111 |
|
| 112 |
+
exported_1 = torch.export.export(
|
| 113 |
mod=pipeline.transformer,
|
| 114 |
args=call.args,
|
| 115 |
kwargs=call.kwargs,
|
| 116 |
dynamic_shapes=dynamic_shapes,
|
| 117 |
)
|
| 118 |
+
exported_2 = torch.export.export(
|
| 119 |
+
mod=pipeline.transformer_2,
|
| 120 |
+
args=call.args,
|
| 121 |
+
kwargs=call.kwargs,
|
| 122 |
+
dynamic_shapes=dynamic_shapes,
|
| 123 |
+
)
|
| 124 |
|
| 125 |
+
compiled_1 = aoti_compile(exported_1, INDUCTOR_CONFIGS)
|
| 126 |
+
compiled_2 = aoti_compile(exported_2, INDUCTOR_CONFIGS)
|
| 127 |
+
return compiled_1, compiled_2
|
| 128 |
+
|
| 129 |
+
# Quantize text encoder
|
| 130 |
+
quantize_(pipeline.text_encoder, Int8WeightOnlyConfig())
|
| 131 |
+
|
| 132 |
+
use_precompiled = False
|
| 133 |
+
precompiled_repo = os.getenv("WAN_PRECOMPILED_REPO", "Fabrice-TIERCELIN/Wan_2.2_compiled")
|
| 134 |
+
|
| 135 |
+
if use_precompiled:
|
| 136 |
+
try:
|
| 137 |
+
compiled_transformer_1, compiled_transformer_2 = load_compiled_transformers_from_hub(
|
| 138 |
+
repo_id=precompiled_repo,
|
| 139 |
+
device="cuda",
|
| 140 |
+
)
|
| 141 |
+
except Exception as e:
|
| 142 |
+
# fallback if payload format is wrong / outdated
|
| 143 |
+
print(f"[WARN] Failed to load precompiled artifacts ({e}). Falling back to GPU compilation.")
|
| 144 |
+
compiled_transformer_1, compiled_transformer_2 = compile_transformer()
|
| 145 |
+
else:
|
| 146 |
+
compiled_transformer_1, compiled_transformer_2 = compile_transformer()
|
| 147 |
+
|
| 148 |
+
# expose for downloads
|
| 149 |
+
COMPILED_TRANSFORMER_1 = compiled_transformer_1
|
| 150 |
+
COMPILED_TRANSFORMER_2 = compiled_transformer_2
|
| 151 |
+
|
| 152 |
+
pipeline.transformer.forward = compiled_transformer_1
|
| 153 |
+
drain_module_parameters(pipeline.transformer)
|
| 154 |
+
|
| 155 |
+
pipeline.transformer_2.forward = compiled_transformer_2
|
| 156 |
+
drain_module_parameters(pipeline.transformer_2)
|
optimization_utils.py
CHANGED
|
@@ -10,7 +10,6 @@ from unittest.mock import patch
|
|
| 10 |
import torch
|
| 11 |
from torch._inductor.package.package import package_aoti
|
| 12 |
from torch.export.pt2_archive._package import AOTICompiledModel
|
| 13 |
-
from torch.export.pt2_archive._package_weights import TensorProperties
|
| 14 |
from torch.export.pt2_archive._package_weights import Weights
|
| 15 |
|
| 16 |
|
|
@@ -21,31 +20,92 @@ INDUCTOR_CONFIGS_OVERRIDES = {
|
|
| 21 |
}
|
| 22 |
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
class ZeroGPUCompiledModel:
|
| 25 |
-
def __init__(self, archive_file: torch.types.FileLike, weights:
|
| 26 |
self.archive_file = archive_file
|
| 27 |
self.weights = weights
|
| 28 |
-
if cuda:
|
| 29 |
-
self.weights_to_cuda_()
|
| 30 |
self.compiled_model: ContextVar[AOTICompiledModel | None] = ContextVar('compiled_model', default=None)
|
| 31 |
-
|
| 32 |
-
for name in self.weights:
|
| 33 |
-
tensor, properties = self.weights.get_weight(name)
|
| 34 |
-
self.weights[name] = (tensor.to('cuda'), properties)
|
| 35 |
def __call__(self, *args, **kwargs):
|
| 36 |
if (compiled_model := self.compiled_model.get()) is None:
|
| 37 |
-
constants_map = {name: value[0] for name, value in self.weights.items()}
|
| 38 |
compiled_model = cast(AOTICompiledModel, torch._inductor.aoti_load_package(self.archive_file))
|
| 39 |
-
compiled_model.load_constants(constants_map, check_full_update=True, user_managed=True)
|
| 40 |
self.compiled_model.set(compiled_model)
|
| 41 |
return compiled_model(*args, **kwargs)
|
|
|
|
| 42 |
def __reduce__(self):
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
|
| 51 |
def aoti_compile(
|
|
@@ -61,7 +121,8 @@ def aoti_compile(
|
|
| 61 |
files: list[str | Weights] = [file for file in artifacts if isinstance(file, str)]
|
| 62 |
package_aoti(archive_file, files)
|
| 63 |
weights, = (artifact for artifact in artifacts if isinstance(artifact, Weights))
|
| 64 |
-
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
@contextlib.contextmanager
|
|
@@ -94,3 +155,12 @@ def capture_component_call(
|
|
| 94 |
except CapturedCallException as e:
|
| 95 |
captured_call.args = e.args
|
| 96 |
captured_call.kwargs = e.kwargs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
import torch
|
| 11 |
from torch._inductor.package.package import package_aoti
|
| 12 |
from torch.export.pt2_archive._package import AOTICompiledModel
|
|
|
|
| 13 |
from torch.export.pt2_archive._package_weights import Weights
|
| 14 |
|
| 15 |
|
|
|
|
| 20 |
}
|
| 21 |
|
| 22 |
|
| 23 |
+
class ZeroGPUWeights:
|
| 24 |
+
def __init__(self, constants_map: dict[str, torch.Tensor], to_cuda: bool = False):
|
| 25 |
+
if to_cuda:
|
| 26 |
+
self.constants_map = {name: tensor.to('cuda') for name, tensor in constants_map.items()}
|
| 27 |
+
else:
|
| 28 |
+
self.constants_map = constants_map
|
| 29 |
+
|
| 30 |
+
def __reduce__(self):
|
| 31 |
+
constants_map: dict[str, torch.Tensor] = {}
|
| 32 |
+
for name, tensor in self.constants_map.items():
|
| 33 |
+
tensor_ = torch.empty_like(tensor, device='cpu').pin_memory()
|
| 34 |
+
constants_map[name] = tensor_.copy_(tensor).detach().share_memory_()
|
| 35 |
+
return ZeroGPUWeights, (constants_map, True)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
class ZeroGPUCompiledModel:
|
| 39 |
+
def __init__(self, archive_file: torch.types.FileLike, weights: ZeroGPUWeights):
|
| 40 |
self.archive_file = archive_file
|
| 41 |
self.weights = weights
|
|
|
|
|
|
|
| 42 |
self.compiled_model: ContextVar[AOTICompiledModel | None] = ContextVar('compiled_model', default=None)
|
| 43 |
+
|
|
|
|
|
|
|
|
|
|
| 44 |
def __call__(self, *args, **kwargs):
|
| 45 |
if (compiled_model := self.compiled_model.get()) is None:
|
|
|
|
| 46 |
compiled_model = cast(AOTICompiledModel, torch._inductor.aoti_load_package(self.archive_file))
|
| 47 |
+
compiled_model.load_constants(self.weights.constants_map, check_full_update=True, user_managed=True)
|
| 48 |
self.compiled_model.set(compiled_model)
|
| 49 |
return compiled_model(*args, **kwargs)
|
| 50 |
+
|
| 51 |
def __reduce__(self):
|
| 52 |
+
return ZeroGPUCompiledModel, (self.archive_file, self.weights)
|
| 53 |
+
|
| 54 |
+
# --- NEW: stable payload export (matches your zip export format) ---
|
| 55 |
+
def to_serializable_dict(self) -> dict[str, Any]:
|
| 56 |
+
"""
|
| 57 |
+
Stable representation that can be stored with torch.save and re-loaded later,
|
| 58 |
+
without depending on runtime state.
|
| 59 |
+
"""
|
| 60 |
+
if hasattr(self.archive_file, "getvalue"):
|
| 61 |
+
archive_bytes = self.archive_file.getvalue()
|
| 62 |
+
else:
|
| 63 |
+
pos = self.archive_file.tell()
|
| 64 |
+
self.archive_file.seek(0)
|
| 65 |
+
archive_bytes = self.archive_file.read()
|
| 66 |
+
self.archive_file.seek(pos)
|
| 67 |
+
|
| 68 |
+
constants_cpu = {k: v.detach().to("cpu") for k, v in self.weights.constants_map.items()}
|
| 69 |
+
return {
|
| 70 |
+
"format": "zerogpu_aoti_v1",
|
| 71 |
+
"archive_bytes": archive_bytes,
|
| 72 |
+
"constants_map": constants_cpu,
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# --- NEW: loader for the payload dict saved in .pt ---
|
| 77 |
+
class ZeroGPUCompiledModelFromDict:
|
| 78 |
+
"""
|
| 79 |
+
Rebuilds a callable AOTI model from a dict payload produced by ZeroGPUCompiledModel.to_serializable_dict().
|
| 80 |
+
|
| 81 |
+
Important: constants are moved to CUDA on first call to avoid:
|
| 82 |
+
- dtype/device mismatches (CPU bf16 weights vs CUDA bf16 inputs),
|
| 83 |
+
- black outputs due to wrong device handling.
|
| 84 |
+
"""
|
| 85 |
+
def __init__(self, payload: dict[str, Any], device: str = "cuda"):
|
| 86 |
+
if payload.get("format") != "zerogpu_aoti_v1":
|
| 87 |
+
raise ValueError(f"Unsupported payload format: {payload.get('format')}")
|
| 88 |
+
self.archive_file = BytesIO(payload["archive_bytes"])
|
| 89 |
+
constants = payload["constants_map"]
|
| 90 |
+
constants = {k: v.to(device=device, dtype=torch.bfloat16).contiguous() for k, v in constants.items()}
|
| 91 |
+
payload["constants_map"] = constants
|
| 92 |
+
self.constants_map_cpu: dict[str, torch.Tensor] = payload["constants_map"]
|
| 93 |
+
self.device = device
|
| 94 |
+
self.compiled_model: ContextVar[AOTICompiledModel | None] = ContextVar("compiled_model", default=None)
|
| 95 |
+
self._loaded_constants = False
|
| 96 |
+
|
| 97 |
+
def __call__(self, *args, **kwargs):
|
| 98 |
+
if (compiled_model := self.compiled_model.get()) is None:
|
| 99 |
+
compiled_model = cast(AOTICompiledModel, torch._inductor.aoti_load_package(self.archive_file))
|
| 100 |
+
self.compiled_model.set(compiled_model)
|
| 101 |
+
|
| 102 |
+
if not self._loaded_constants:
|
| 103 |
+
# Move constants to target device (cuda) and keep dtype as-is (bf16)
|
| 104 |
+
constants_map = {k: v.to(device="cuda", dtype=torch.bfloat16).contiguous() for k, v in self.constants_map_cpu.items()}
|
| 105 |
+
compiled_model.load_constants(constants_map, check_full_update=True, user_managed=True)
|
| 106 |
+
self._loaded_constants = True
|
| 107 |
+
|
| 108 |
+
return compiled_model(*args, **kwargs)
|
| 109 |
|
| 110 |
|
| 111 |
def aoti_compile(
|
|
|
|
| 121 |
files: list[str | Weights] = [file for file in artifacts if isinstance(file, str)]
|
| 122 |
package_aoti(archive_file, files)
|
| 123 |
weights, = (artifact for artifact in artifacts if isinstance(artifact, Weights))
|
| 124 |
+
zerogpu_weights = ZeroGPUWeights({name: weights.get_weight(name)[0] for name in weights})
|
| 125 |
+
return ZeroGPUCompiledModel(archive_file, zerogpu_weights)
|
| 126 |
|
| 127 |
|
| 128 |
@contextlib.contextmanager
|
|
|
|
| 155 |
except CapturedCallException as e:
|
| 156 |
captured_call.args = e.args
|
| 157 |
captured_call.kwargs = e.kwargs
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def drain_module_parameters(module: torch.nn.Module):
|
| 161 |
+
state_dict_meta = {name: {'device': tensor.device, 'dtype': tensor.dtype} for name, tensor in module.state_dict().items()}
|
| 162 |
+
state_dict = {name: torch.nn.Parameter(torch.empty_like(tensor, device='cpu')) for name, tensor in module.state_dict().items()}
|
| 163 |
+
module.load_state_dict(state_dict, assign=True)
|
| 164 |
+
for name, param in state_dict.items():
|
| 165 |
+
meta = state_dict_meta[name]
|
| 166 |
+
param.data = torch.Tensor([]).to(**meta)
|
requirements.txt
CHANGED
|
@@ -1,5 +1,12 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
git+https://github.com/YassineT-cdc/diffusers.git@wan22-loras_optimized_contigous
|
| 2 |
+
|
| 3 |
+
transformers==4.57.3
|
| 4 |
+
accelerate==1.12.0
|
| 5 |
+
safetensors==0.7.0
|
| 6 |
+
sentencepiece==0.2.1
|
| 7 |
+
peft==0.18.0
|
| 8 |
+
ftfy==6.3.1
|
| 9 |
+
imageio==2.37.2
|
| 10 |
+
imageio-ffmpeg==0.6.0
|
| 11 |
+
|
| 12 |
+
torchao==0.14.1
|