Spaces:
Runtime error
Runtime error
Adnan commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
# ============================================
|
| 2 |
# PATCH 1: Fix huggingface_hub HfFolder removal
|
| 3 |
-
# Must be BEFORE gradio import
|
| 4 |
# ============================================
|
| 5 |
try:
|
| 6 |
from huggingface_hub import HfFolder
|
|
@@ -29,11 +28,13 @@ except ImportError:
|
|
| 29 |
# ============================================
|
| 30 |
|
| 31 |
"""
|
| 32 |
-
TimeLapseForge
|
|
|
|
| 33 |
"""
|
| 34 |
|
| 35 |
import os
|
| 36 |
import json
|
|
|
|
| 37 |
import gradio as gr
|
| 38 |
import numpy as np
|
| 39 |
from PIL import Image
|
|
@@ -41,34 +42,21 @@ from typing import List, Optional
|
|
| 41 |
|
| 42 |
# ============================================
|
| 43 |
# PATCH 2: Fix gradio_client schema bug
|
| 44 |
-
# TypeError: argument of type 'bool' is not iterable
|
| 45 |
-
# Happens in gradio_client/utils.py when schema is True/False
|
| 46 |
# ============================================
|
| 47 |
try:
|
| 48 |
import gradio_client.utils as _gc_utils
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
def _patched_json_schema_to_python_type(schema, defs=None):
|
| 53 |
-
if isinstance(schema, bool):
|
| 54 |
-
return "Any"
|
| 55 |
-
if not isinstance(schema, dict):
|
| 56 |
-
return "Any"
|
| 57 |
-
return _orig_json_schema_to_python_type(schema, defs)
|
| 58 |
-
|
| 59 |
-
_gc_utils._json_schema_to_python_type = _patched_json_schema_to_python_type
|
| 60 |
-
|
| 61 |
-
_orig_get_type = _gc_utils.get_type
|
| 62 |
-
|
| 63 |
-
def _patched_get_type(schema):
|
| 64 |
-
if isinstance(schema, bool):
|
| 65 |
return "Any"
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
return "Any"
|
| 68 |
-
return
|
| 69 |
-
|
| 70 |
-
_gc_utils.get_type = _patched_get_type
|
| 71 |
-
|
| 72 |
except Exception:
|
| 73 |
pass
|
| 74 |
# ============================================
|
|
@@ -78,10 +66,14 @@ from frame_interpolator import FrameInterpolator
|
|
| 78 |
from video_assembler import VideoAssembler
|
| 79 |
from api_providers import (
|
| 80 |
PROVIDERS, PROVIDER_DISPLAY_NAMES,
|
| 81 |
-
get_models_for_provider, get_provider_info,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
)
|
| 83 |
|
| 84 |
-
# ---
|
| 85 |
prompt_parser = PromptParser()
|
| 86 |
quick_gen = QuickGenerator()
|
| 87 |
interpolator = FrameInterpolator()
|
|
@@ -94,20 +86,34 @@ LOCAL_MODELS = {
|
|
| 94 |
"SD 1.5 (Light)": "runwayml/stable-diffusion-v1-5",
|
| 95 |
}
|
| 96 |
|
| 97 |
-
|
|
|
|
| 98 |
|
| 99 |
|
| 100 |
-
def
|
| 101 |
-
return PROVIDER_DISPLAY_NAMES.get(
|
| 102 |
|
|
|
|
|
|
|
| 103 |
|
| 104 |
-
def
|
| 105 |
-
key =
|
| 106 |
models = get_models_for_provider(key)
|
| 107 |
if models:
|
| 108 |
return gr.update(choices=models, value=models[0])
|
| 109 |
return gr.update(choices=["custom"], value="custom")
|
| 110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
|
| 112 |
def parse_input(json_text, quick_text, num_panels, mode):
|
| 113 |
if json_text and json_text.strip():
|
|
@@ -116,39 +122,19 @@ def parse_input(json_text, quick_text, num_panels, mode):
|
|
| 116 |
data = result["data"]
|
| 117 |
prompts = prompt_parser.extract_prompts(data)
|
| 118 |
summary = prompt_parser.get_summary(data)
|
| 119 |
-
table = [
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
]
|
| 124 |
-
return (
|
| 125 |
-
summary, table,
|
| 126 |
-
json.dumps(data, indent=2),
|
| 127 |
-
gr.update(visible=True),
|
| 128 |
-
)
|
| 129 |
-
return (
|
| 130 |
-
"Parse Error: " + str(result["error"]),
|
| 131 |
-
[], "", gr.update(visible=False),
|
| 132 |
-
)
|
| 133 |
elif quick_text and quick_text.strip():
|
| 134 |
data = quick_gen.generate(quick_text, int(num_panels), mode)
|
| 135 |
prompts = prompt_parser.extract_prompts(data)
|
| 136 |
summary = prompt_parser.get_summary(data)
|
| 137 |
-
table = [
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
return (
|
| 143 |
-
summary + "\n\n*Quick text mode -- use GPT JSON for better results.*",
|
| 144 |
-
table,
|
| 145 |
-
json.dumps(data, indent=2),
|
| 146 |
-
gr.update(visible=True),
|
| 147 |
-
)
|
| 148 |
-
return (
|
| 149 |
-
"Please paste JSON or enter quick text.",
|
| 150 |
-
[], "", gr.update(visible=False),
|
| 151 |
-
)
|
| 152 |
|
| 153 |
|
| 154 |
def generate_panels(
|
|
@@ -158,73 +144,62 @@ def generate_panels(
|
|
| 158 |
ref_image, progress=gr.Progress(),
|
| 159 |
):
|
| 160 |
if not parsed_json:
|
| 161 |
-
return [], [], "No JSON
|
| 162 |
-
|
| 163 |
try:
|
| 164 |
data = json.loads(parsed_json)
|
| 165 |
except json.JSONDecodeError:
|
| 166 |
return [], [], "Invalid JSON."
|
| 167 |
-
|
| 168 |
prompts = prompt_parser.extract_prompts(data)
|
| 169 |
if not prompts:
|
| 170 |
-
return [], [], "No panels
|
| 171 |
|
| 172 |
from image_generator import ImageGenerator
|
| 173 |
-
|
| 174 |
if gen_mode == "Local (Free GPU)":
|
| 175 |
-
|
| 176 |
-
gen = ImageGenerator(mode="local", local_model_id=
|
| 177 |
else:
|
| 178 |
-
pkey =
|
| 179 |
if not api_key or not api_key.strip():
|
| 180 |
-
return [], [], "API key required
|
| 181 |
gen = ImageGenerator(
|
| 182 |
mode="api", provider_name=pkey, api_key=api_key.strip(),
|
| 183 |
api_model=api_model, custom_base_url=custom_base_url,
|
| 184 |
-
custom_endpoint_url=custom_endpoint_url
|
| 185 |
-
)
|
| 186 |
|
| 187 |
-
progress(0, desc="Starting
|
|
|
|
|
|
|
| 188 |
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
gen_steps = int(steps) if steps and int(steps) > 0 else None
|
| 193 |
-
gen_guidance = float(guidance) if guidance is not None and float(guidance) >= 0 else None
|
| 194 |
w = int(width) if width and int(width) > 0 else None
|
| 195 |
h = int(height) if height and int(height) > 0 else None
|
| 196 |
|
| 197 |
images = gen.generate_all_panels(
|
| 198 |
prompts=prompts, strength=float(strength), base_seed=int(base_seed),
|
| 199 |
-
steps=
|
| 200 |
-
reference_image=ref_image, progress_callback=cb
|
| 201 |
-
)
|
| 202 |
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
gallery_items.append((img, label))
|
| 207 |
-
|
| 208 |
-
return gallery_items, images, "Generated " + str(len(images)) + " panels successfully!"
|
| 209 |
|
| 210 |
|
| 211 |
def interpolate_and_assemble(
|
| 212 |
-
images_state, parsed_json,
|
| 213 |
-
fps,
|
| 214 |
music_file, export_gif, progress=gr.Progress(),
|
| 215 |
):
|
| 216 |
if not images_state:
|
| 217 |
-
return None, None, None, "No images
|
| 218 |
-
|
| 219 |
images = images_state
|
| 220 |
-
|
| 221 |
|
| 222 |
progress(0.1, desc="Interpolating...")
|
| 223 |
-
if
|
| 224 |
def icb(c, t):
|
| 225 |
-
progress(0.1 + (c
|
| 226 |
-
smooth = interpolator.interpolate_sequence(
|
| 227 |
-
images, interp_multiplier, interp_method, icb)
|
| 228 |
else:
|
| 229 |
smooth = list(images)
|
| 230 |
|
|
@@ -232,85 +207,323 @@ def interpolate_and_assemble(
|
|
| 232 |
if add_labels and parsed_json:
|
| 233 |
try:
|
| 234 |
data = json.loads(parsed_json)
|
| 235 |
-
|
| 236 |
-
if
|
| 237 |
labels = []
|
| 238 |
-
for p in
|
| 239 |
labels.append(p.get("timestamp_label", ""))
|
| 240 |
-
labels.extend([""] *
|
| 241 |
labels = labels[:len(smooth)]
|
| 242 |
else:
|
| 243 |
-
labels = [p.get("timestamp_label", "") for p in
|
| 244 |
except Exception:
|
| 245 |
pass
|
| 246 |
|
| 247 |
-
progress(0.5, desc="
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
video_path = assembler.create_video(
|
| 253 |
-
smooth, fps=int(fps), hold_seconds=adj_hold,
|
| 254 |
add_labels=add_labels, labels=labels,
|
| 255 |
-
add_progress=
|
| 256 |
-
)
|
| 257 |
|
| 258 |
if music_file:
|
| 259 |
-
progress(0.8, desc="
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
progress(0.9, desc="Creating comparison...")
|
| 263 |
-
comparison = assembler.create_comparison_image(images[0], images[-1])
|
| 264 |
|
| 265 |
-
|
|
|
|
|
|
|
| 266 |
if export_gif:
|
| 267 |
-
|
| 268 |
-
gif_path = assembler.create_gif(images)
|
| 269 |
|
| 270 |
-
|
| 271 |
-
return (
|
| 272 |
-
video_path, comparison, gif_path,
|
| 273 |
-
"Video created! " + str(len(smooth)) + " frames at " + str(int(fps)) + "fps",
|
| 274 |
-
)
|
| 275 |
|
| 276 |
|
| 277 |
def regenerate_single(
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
api_model, custom_base_url, custom_endpoint_url,
|
| 281 |
-
strength, base_seed,
|
| 282 |
):
|
| 283 |
-
if not
|
| 284 |
-
return
|
| 285 |
-
|
| 286 |
try:
|
| 287 |
-
data = json.loads(
|
| 288 |
prompts = prompt_parser.extract_prompts(data)
|
| 289 |
except Exception:
|
| 290 |
-
return
|
| 291 |
-
|
| 292 |
-
idx =
|
| 293 |
-
|
| 294 |
-
return images_state, [], "Panel must be 1-" + str(len(images_state))
|
| 295 |
|
| 296 |
from image_generator import ImageGenerator
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
gen = ImageGenerator(mode="local", local_model_id=model_id)
|
| 301 |
else:
|
| 302 |
-
pkey = display_to_key(provider_name)
|
| 303 |
gen = ImageGenerator(
|
| 304 |
-
mode="api", provider_name=
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
|
| 309 |
-
_, updated = gen.regenerate_single_panel(
|
| 310 |
-
idx, prompts, images_state, float(strength), int(base_seed))
|
| 311 |
|
| 312 |
-
|
| 313 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 314 |
|
| 315 |
|
| 316 |
# ===============================================
|
|
@@ -318,39 +531,39 @@ def regenerate_single(
|
|
| 318 |
# ===============================================
|
| 319 |
|
| 320 |
HEADER = (
|
| 321 |
-
"# TimeLapseForge
|
| 322 |
-
"###
|
| 323 |
-
"
|
| 324 |
-
"**
|
| 325 |
-
"**Ideogram** | **Leonardo** | **Custom API** | **Local GPU**"
|
| 326 |
)
|
| 327 |
|
| 328 |
API_HELP = (
|
| 329 |
-
"###
|
| 330 |
-
"| Provider |
|
| 331 |
-
"|
|
| 332 |
-
"|
|
| 333 |
-
"|
|
| 334 |
-
"|
|
| 335 |
-
"|
|
| 336 |
-
"|
|
| 337 |
-
"|
|
| 338 |
-
"|
|
| 339 |
-
"|
|
| 340 |
-
"|
|
| 341 |
-
"
|
| 342 |
-
"|
|
| 343 |
-
"|
|
| 344 |
-
"\n"
|
| 345 |
-
"
|
| 346 |
-
"
|
| 347 |
-
"
|
| 348 |
-
"
|
|
|
|
| 349 |
)
|
| 350 |
|
| 351 |
|
| 352 |
with gr.Blocks(
|
| 353 |
-
title="TimeLapseForge
|
| 354 |
theme=gr.themes.Soft(primary_hue="emerald", secondary_hue="blue"),
|
| 355 |
) as app:
|
| 356 |
|
|
@@ -361,250 +574,300 @@ with gr.Blocks(
|
|
| 361 |
|
| 362 |
with gr.Tabs():
|
| 363 |
|
| 364 |
-
# ===
|
| 365 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 366 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 367 |
with gr.Column(scale=2):
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
with gr.Column(scale=1):
|
| 374 |
-
gr.
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
)
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
)
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
)
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
label="Extracted Panels", visible=False, wrap=True,
|
| 393 |
-
)
|
| 394 |
-
|
| 395 |
-
# === TAB 2: GENERATE ===
|
| 396 |
-
with gr.Tab("Generate Panels"):
|
| 397 |
with gr.Row():
|
| 398 |
with gr.Column(scale=1):
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
choices=PROVIDER_CHOICES,
|
| 417 |
-
value=PROVIDER_CHOICES[0] if PROVIDER_CHOICES else "OpenAI",
|
| 418 |
-
label="Provider",
|
| 419 |
-
)
|
| 420 |
-
api_key_input = gr.Textbox(
|
| 421 |
-
label="API Key",
|
| 422 |
-
placeholder="Paste your API key here...",
|
| 423 |
-
type="password",
|
| 424 |
-
)
|
| 425 |
-
api_model_dropdown = gr.Dropdown(
|
| 426 |
-
label="Model",
|
| 427 |
-
choices=["dall-e-3", "dall-e-2", "gpt-image-1"],
|
| 428 |
-
value="dall-e-3",
|
| 429 |
-
allow_custom_value=True,
|
| 430 |
-
)
|
| 431 |
-
with gr.Accordion("Custom API Settings", open=False):
|
| 432 |
-
custom_base_url = gr.Textbox(
|
| 433 |
-
label="Custom Base URL",
|
| 434 |
-
placeholder="https://your-api.com/v1",
|
| 435 |
-
)
|
| 436 |
-
custom_endpoint_url = gr.Textbox(
|
| 437 |
-
label="Direct Endpoint URL",
|
| 438 |
-
placeholder="https://your-api.com/generate",
|
| 439 |
-
)
|
| 440 |
-
|
| 441 |
-
gr.Markdown("### Generation Settings")
|
| 442 |
-
strength = gr.Slider(
|
| 443 |
-
minimum=0.15, maximum=0.70, value=0.38, step=0.01,
|
| 444 |
-
label="Change Strength (lower=more consistent)",
|
| 445 |
-
)
|
| 446 |
-
base_seed = gr.Number(value=42, label="Seed", precision=0)
|
| 447 |
-
with gr.Row():
|
| 448 |
-
steps = gr.Slider(
|
| 449 |
-
minimum=0, maximum=50, value=0, step=1,
|
| 450 |
-
label="Steps (0=auto)",
|
| 451 |
-
)
|
| 452 |
-
guidance = gr.Slider(
|
| 453 |
-
minimum=-1, maximum=15, value=-1, step=0.5,
|
| 454 |
-
label="CFG (-1=auto)",
|
| 455 |
-
)
|
| 456 |
-
with gr.Row():
|
| 457 |
-
img_width = gr.Number(
|
| 458 |
-
value=1024, label="Width", precision=0
|
| 459 |
-
)
|
| 460 |
-
img_height = gr.Number(
|
| 461 |
-
value=1024, label="Height", precision=0
|
| 462 |
-
)
|
| 463 |
-
|
| 464 |
-
ref_image = gr.Image(
|
| 465 |
-
label="Reference Image (optional)", type="pil"
|
| 466 |
-
)
|
| 467 |
-
|
| 468 |
-
with gr.Column(scale=3):
|
| 469 |
-
generate_btn = gr.Button(
|
| 470 |
-
"Generate All Panels", variant="primary", size="lg"
|
| 471 |
-
)
|
| 472 |
-
gen_status = gr.Markdown("")
|
| 473 |
-
gallery = gr.Gallery(
|
| 474 |
-
label="Generated Panels",
|
| 475 |
-
columns=5, rows=3, height=450,
|
| 476 |
-
object_fit="contain",
|
| 477 |
-
)
|
| 478 |
-
|
| 479 |
-
with gr.Accordion("Regenerate Single Panel", open=False):
|
| 480 |
-
with gr.Row():
|
| 481 |
-
regen_num = gr.Number(
|
| 482 |
-
value=1, label="Panel Number", precision=0
|
| 483 |
-
)
|
| 484 |
-
regen_btn = gr.Button(
|
| 485 |
-
"Regenerate", variant="secondary"
|
| 486 |
-
)
|
| 487 |
-
regen_status = gr.Markdown("")
|
| 488 |
-
|
| 489 |
-
# === TAB 3: VIDEO ===
|
| 490 |
-
with gr.Tab("Create Video"):
|
| 491 |
with gr.Row():
|
| 492 |
with gr.Column(scale=1):
|
| 493 |
-
gr.
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
)
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
)
|
| 502 |
-
|
| 503 |
-
gr.
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
gr.
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
)
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
)
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
)
|
| 523 |
-
|
| 524 |
-
gr.
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 533 |
with gr.Column(scale=2):
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
)
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
# === TAB 4: API HELP ===
|
| 543 |
with gr.Tab("API Keys Guide"):
|
| 544 |
gr.Markdown(API_HELP)
|
| 545 |
|
| 546 |
-
# ===
|
|
|
|
|
|
|
| 547 |
|
| 548 |
-
def
|
| 549 |
if mode == "Local (Free GPU)":
|
| 550 |
return gr.update(visible=True), gr.update(visible=False)
|
| 551 |
return gr.update(visible=False), gr.update(visible=True)
|
| 552 |
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
)
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
|
| 562 |
-
|
| 563 |
-
|
| 564 |
-
|
| 565 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 566 |
|
| 567 |
-
parse_btn.click(
|
| 568 |
-
fn=parse_input,
|
| 569 |
-
inputs=[json_input, quick_text, quick_panels, quick_mode],
|
| 570 |
-
outputs=[parse_status, prompt_table, parsed_json_state, prompt_table],
|
| 571 |
-
)
|
| 572 |
|
| 573 |
-
generate_btn.click(
|
| 574 |
-
fn=generate_panels,
|
| 575 |
-
inputs=[
|
| 576 |
-
parsed_json_state, gen_mode, local_model, provider_dropdown,
|
| 577 |
-
api_key_input, api_model_dropdown, custom_base_url,
|
| 578 |
-
custom_endpoint_url, strength, base_seed, steps, guidance,
|
| 579 |
-
img_width, img_height, ref_image,
|
| 580 |
-
],
|
| 581 |
-
outputs=[gallery, images_state, gen_status],
|
| 582 |
-
)
|
| 583 |
-
|
| 584 |
-
regen_btn.click(
|
| 585 |
-
fn=regenerate_single,
|
| 586 |
-
inputs=[
|
| 587 |
-
regen_num, parsed_json_state, images_state,
|
| 588 |
-
gen_mode, local_model, provider_dropdown, api_key_input,
|
| 589 |
-
api_model_dropdown, custom_base_url, custom_endpoint_url,
|
| 590 |
-
strength, base_seed,
|
| 591 |
-
],
|
| 592 |
-
outputs=[images_state, gallery, regen_status],
|
| 593 |
-
)
|
| 594 |
-
|
| 595 |
-
assemble_btn.click(
|
| 596 |
-
fn=interpolate_and_assemble,
|
| 597 |
-
inputs=[
|
| 598 |
-
images_state, parsed_json_state, interp_mult, interp_method,
|
| 599 |
-
vid_fps, frame_hold, add_labels, add_progress_bar, add_bookend,
|
| 600 |
-
music_file, export_gif,
|
| 601 |
-
],
|
| 602 |
-
outputs=[video_output, comparison_output, gif_output, vid_status],
|
| 603 |
-
)
|
| 604 |
-
|
| 605 |
-
|
| 606 |
-
# ===============================================
|
| 607 |
-
# LAUNCH
|
| 608 |
# ===============================================
|
| 609 |
if __name__ == "__main__":
|
| 610 |
app.launch(
|
|
|
|
| 1 |
# ============================================
|
| 2 |
# PATCH 1: Fix huggingface_hub HfFolder removal
|
|
|
|
| 3 |
# ============================================
|
| 4 |
try:
|
| 5 |
from huggingface_hub import HfFolder
|
|
|
|
| 28 |
# ============================================
|
| 29 |
|
| 30 |
"""
|
| 31 |
+
TimeLapseForge v3.0 - Full Creative Studio
|
| 32 |
+
Timelapse + T2I + I2I + T2V + I2V + Frames2Video + Ingredients2Video
|
| 33 |
"""
|
| 34 |
|
| 35 |
import os
|
| 36 |
import json
|
| 37 |
+
import tempfile
|
| 38 |
import gradio as gr
|
| 39 |
import numpy as np
|
| 40 |
from PIL import Image
|
|
|
|
| 42 |
|
| 43 |
# ============================================
|
| 44 |
# PATCH 2: Fix gradio_client schema bug
|
|
|
|
|
|
|
| 45 |
# ============================================
|
| 46 |
try:
|
| 47 |
import gradio_client.utils as _gc_utils
|
| 48 |
+
_orig_jst = _gc_utils._json_schema_to_python_type
|
| 49 |
+
def _patched_jst(schema, defs=None):
|
| 50 |
+
if isinstance(schema, bool) or not isinstance(schema, dict):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
return "Any"
|
| 52 |
+
return _orig_jst(schema, defs)
|
| 53 |
+
_gc_utils._json_schema_to_python_type = _patched_jst
|
| 54 |
+
_orig_gt = _gc_utils.get_type
|
| 55 |
+
def _patched_gt(schema):
|
| 56 |
+
if isinstance(schema, bool) or not isinstance(schema, dict):
|
| 57 |
return "Any"
|
| 58 |
+
return _orig_gt(schema)
|
| 59 |
+
_gc_utils.get_type = _patched_gt
|
|
|
|
|
|
|
| 60 |
except Exception:
|
| 61 |
pass
|
| 62 |
# ============================================
|
|
|
|
| 66 |
from video_assembler import VideoAssembler
|
| 67 |
from api_providers import (
|
| 68 |
PROVIDERS, PROVIDER_DISPLAY_NAMES,
|
| 69 |
+
get_models_for_provider, get_provider_info, get_provider,
|
| 70 |
+
)
|
| 71 |
+
from video_providers import (
|
| 72 |
+
VIDEO_PROVIDERS, VIDEO_PROVIDER_DISPLAY_NAMES,
|
| 73 |
+
get_video_provider, get_video_provider_info, get_video_models_for_provider,
|
| 74 |
)
|
| 75 |
|
| 76 |
+
# --- Init modules ---
|
| 77 |
prompt_parser = PromptParser()
|
| 78 |
quick_gen = QuickGenerator()
|
| 79 |
interpolator = FrameInterpolator()
|
|
|
|
| 86 |
"SD 1.5 (Light)": "runwayml/stable-diffusion-v1-5",
|
| 87 |
}
|
| 88 |
|
| 89 |
+
IMG_PROVIDER_CHOICES = [p["display_name"] for p in get_provider_info()]
|
| 90 |
+
VID_PROVIDER_CHOICES = [p["display_name"] for p in get_video_provider_info()]
|
| 91 |
|
| 92 |
|
| 93 |
+
def img_display_to_key(name):
|
| 94 |
+
return PROVIDER_DISPLAY_NAMES.get(name, "openai")
|
| 95 |
|
| 96 |
+
def vid_display_to_key(name):
|
| 97 |
+
return VIDEO_PROVIDER_DISPLAY_NAMES.get(name, "fal_video")
|
| 98 |
|
| 99 |
+
def update_img_models(name):
|
| 100 |
+
key = img_display_to_key(name)
|
| 101 |
models = get_models_for_provider(key)
|
| 102 |
if models:
|
| 103 |
return gr.update(choices=models, value=models[0])
|
| 104 |
return gr.update(choices=["custom"], value="custom")
|
| 105 |
|
| 106 |
+
def update_vid_models(name):
|
| 107 |
+
key = vid_display_to_key(name)
|
| 108 |
+
models = get_video_models_for_provider(key)
|
| 109 |
+
if models:
|
| 110 |
+
return gr.update(choices=models, value=models[0])
|
| 111 |
+
return gr.update(choices=["custom"], value="custom")
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# ============================================
|
| 115 |
+
# TIMELAPSE FUNCTIONS (existing)
|
| 116 |
+
# ============================================
|
| 117 |
|
| 118 |
def parse_input(json_text, quick_text, num_panels, mode):
|
| 119 |
if json_text and json_text.strip():
|
|
|
|
| 122 |
data = result["data"]
|
| 123 |
prompts = prompt_parser.extract_prompts(data)
|
| 124 |
summary = prompt_parser.get_summary(data)
|
| 125 |
+
table = [[p["panel_id"], p["phase"], p["panel_title"],
|
| 126 |
+
p["main_prompt"][:100] + "..."] for p in prompts]
|
| 127 |
+
return summary, table, json.dumps(data, indent=2), gr.update(visible=True)
|
| 128 |
+
return "Parse Error: " + str(result["error"]), [], "", gr.update(visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
elif quick_text and quick_text.strip():
|
| 130 |
data = quick_gen.generate(quick_text, int(num_panels), mode)
|
| 131 |
prompts = prompt_parser.extract_prompts(data)
|
| 132 |
summary = prompt_parser.get_summary(data)
|
| 133 |
+
table = [[p["panel_id"], p["phase"], p["panel_title"],
|
| 134 |
+
p["main_prompt"][:100] + "..."] for p in prompts]
|
| 135 |
+
return (summary + "\n\n*Quick text -- use GPT JSON for better results*",
|
| 136 |
+
table, json.dumps(data, indent=2), gr.update(visible=True))
|
| 137 |
+
return "Please paste JSON or enter quick text.", [], "", gr.update(visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
|
| 140 |
def generate_panels(
|
|
|
|
| 144 |
ref_image, progress=gr.Progress(),
|
| 145 |
):
|
| 146 |
if not parsed_json:
|
| 147 |
+
return [], [], "No JSON. Parse input first."
|
|
|
|
| 148 |
try:
|
| 149 |
data = json.loads(parsed_json)
|
| 150 |
except json.JSONDecodeError:
|
| 151 |
return [], [], "Invalid JSON."
|
|
|
|
| 152 |
prompts = prompt_parser.extract_prompts(data)
|
| 153 |
if not prompts:
|
| 154 |
+
return [], [], "No panels in JSON."
|
| 155 |
|
| 156 |
from image_generator import ImageGenerator
|
|
|
|
| 157 |
if gen_mode == "Local (Free GPU)":
|
| 158 |
+
mid = LOCAL_MODELS.get(local_model, "stabilityai/sdxl-turbo")
|
| 159 |
+
gen = ImageGenerator(mode="local", local_model_id=mid)
|
| 160 |
else:
|
| 161 |
+
pkey = img_display_to_key(provider_name)
|
| 162 |
if not api_key or not api_key.strip():
|
| 163 |
+
return [], [], "API key required."
|
| 164 |
gen = ImageGenerator(
|
| 165 |
mode="api", provider_name=pkey, api_key=api_key.strip(),
|
| 166 |
api_model=api_model, custom_base_url=custom_base_url,
|
| 167 |
+
custom_endpoint_url=custom_endpoint_url)
|
|
|
|
| 168 |
|
| 169 |
+
progress(0, desc="Starting...")
|
| 170 |
+
def cb(c, t):
|
| 171 |
+
progress(c / t, desc="Panel " + str(c) + "/" + str(t))
|
| 172 |
|
| 173 |
+
gs = int(steps) if steps and int(steps) > 0 else None
|
| 174 |
+
gg = float(guidance) if guidance is not None and float(guidance) >= 0 else None
|
|
|
|
|
|
|
|
|
|
| 175 |
w = int(width) if width and int(width) > 0 else None
|
| 176 |
h = int(height) if height and int(height) > 0 else None
|
| 177 |
|
| 178 |
images = gen.generate_all_panels(
|
| 179 |
prompts=prompts, strength=float(strength), base_seed=int(base_seed),
|
| 180 |
+
steps=gs, guidance=gg, width=w, height=h,
|
| 181 |
+
reference_image=ref_image, progress_callback=cb)
|
|
|
|
| 182 |
|
| 183 |
+
gallery = [(img, prompts[i]["panel_title"] if i < len(prompts) else "Panel " + str(i+1))
|
| 184 |
+
for i, img in enumerate(images)]
|
| 185 |
+
return gallery, images, "Generated " + str(len(images)) + " panels!"
|
|
|
|
|
|
|
|
|
|
| 186 |
|
| 187 |
|
| 188 |
def interpolate_and_assemble(
|
| 189 |
+
images_state, parsed_json, interp_mult, interp_method,
|
| 190 |
+
fps, hold_sec, add_labels, add_progress, add_bookend,
|
| 191 |
music_file, export_gif, progress=gr.Progress(),
|
| 192 |
):
|
| 193 |
if not images_state:
|
| 194 |
+
return None, None, None, "No images. Generate first."
|
|
|
|
| 195 |
images = images_state
|
| 196 |
+
mult = int(interp_mult)
|
| 197 |
|
| 198 |
progress(0.1, desc="Interpolating...")
|
| 199 |
+
if mult > 1:
|
| 200 |
def icb(c, t):
|
| 201 |
+
progress(0.1 + (c/t)*0.3)
|
| 202 |
+
smooth = interpolator.interpolate_sequence(images, mult, interp_method, icb)
|
|
|
|
| 203 |
else:
|
| 204 |
smooth = list(images)
|
| 205 |
|
|
|
|
| 207 |
if add_labels and parsed_json:
|
| 208 |
try:
|
| 209 |
data = json.loads(parsed_json)
|
| 210 |
+
prm = prompt_parser.extract_prompts(data)
|
| 211 |
+
if mult > 1:
|
| 212 |
labels = []
|
| 213 |
+
for p in prm:
|
| 214 |
labels.append(p.get("timestamp_label", ""))
|
| 215 |
+
labels.extend([""] * mult)
|
| 216 |
labels = labels[:len(smooth)]
|
| 217 |
else:
|
| 218 |
+
labels = [p.get("timestamp_label", "") for p in prm]
|
| 219 |
except Exception:
|
| 220 |
pass
|
| 221 |
|
| 222 |
+
progress(0.5, desc="Video...")
|
| 223 |
+
adj = float(hold_sec) / max(mult, 1) if mult > 1 else float(hold_sec)
|
| 224 |
+
vpath = assembler.create_video(
|
| 225 |
+
smooth, fps=int(fps), hold_seconds=adj,
|
|
|
|
|
|
|
|
|
|
| 226 |
add_labels=add_labels, labels=labels,
|
| 227 |
+
add_progress=add_progress, add_bookend_labels=add_bookend)
|
|
|
|
| 228 |
|
| 229 |
if music_file:
|
| 230 |
+
progress(0.8, desc="Audio...")
|
| 231 |
+
vpath = assembler.add_audio_to_video(vpath, music_file)
|
|
|
|
|
|
|
|
|
|
| 232 |
|
| 233 |
+
progress(0.9)
|
| 234 |
+
comp = assembler.create_comparison_image(images[0], images[-1])
|
| 235 |
+
gpath = None
|
| 236 |
if export_gif:
|
| 237 |
+
gpath = assembler.create_gif(images)
|
|
|
|
| 238 |
|
| 239 |
+
return vpath, comp, gpath, "Done! " + str(len(smooth)) + " frames"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
|
| 241 |
|
| 242 |
def regenerate_single(
|
| 243 |
+
pnum, pjson, imgs, gmode, lmodel, prov, akey,
|
| 244 |
+
amodel, curl, eurl, stren, seed,
|
|
|
|
|
|
|
| 245 |
):
|
| 246 |
+
if not imgs or not pjson:
|
| 247 |
+
return imgs, [], "No data."
|
|
|
|
| 248 |
try:
|
| 249 |
+
data = json.loads(pjson)
|
| 250 |
prompts = prompt_parser.extract_prompts(data)
|
| 251 |
except Exception:
|
| 252 |
+
return imgs, [], "Invalid JSON."
|
| 253 |
+
idx = int(pnum) - 1
|
| 254 |
+
if idx < 0 or idx >= len(imgs):
|
| 255 |
+
return imgs, [], "Invalid panel number."
|
|
|
|
| 256 |
|
| 257 |
from image_generator import ImageGenerator
|
| 258 |
+
if gmode == "Local (Free GPU)":
|
| 259 |
+
mid = LOCAL_MODELS.get(lmodel, "stabilityai/sdxl-turbo")
|
| 260 |
+
gen = ImageGenerator(mode="local", local_model_id=mid)
|
|
|
|
| 261 |
else:
|
|
|
|
| 262 |
gen = ImageGenerator(
|
| 263 |
+
mode="api", provider_name=img_display_to_key(prov),
|
| 264 |
+
api_key=akey.strip(), api_model=amodel,
|
| 265 |
+
custom_base_url=curl, custom_endpoint_url=eurl)
|
| 266 |
+
|
| 267 |
+
_, updated = gen.regenerate_single_panel(idx, prompts, imgs, float(stren), int(seed))
|
| 268 |
+
gal = [(img, "Panel " + str(i+1)) for i, img in enumerate(updated)]
|
| 269 |
+
return updated, gal, "Panel " + str(int(pnum)) + " regenerated!"
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# ============================================
|
| 273 |
+
# TEXT TO IMAGE
|
| 274 |
+
# ============================================
|
| 275 |
+
|
| 276 |
+
def do_text_to_image(prompt, neg, provider_name, api_key, model, w, h, seed):
|
| 277 |
+
if not prompt:
|
| 278 |
+
return None, "Enter a prompt."
|
| 279 |
+
if not api_key or not api_key.strip():
|
| 280 |
+
return None, "API key required."
|
| 281 |
+
pkey = img_display_to_key(provider_name)
|
| 282 |
+
prov = get_provider(pkey, api_key.strip())
|
| 283 |
+
try:
|
| 284 |
+
img = prov.generate_image(
|
| 285 |
+
prompt=prompt, negative_prompt=neg,
|
| 286 |
+
width=int(w), height=int(h),
|
| 287 |
+
seed=int(seed) if seed else None, model=model)
|
| 288 |
+
return img, "Image generated!"
|
| 289 |
+
except Exception as e:
|
| 290 |
+
return None, "Error: " + str(e)
|
| 291 |
|
|
|
|
|
|
|
| 292 |
|
| 293 |
+
# ============================================
|
| 294 |
+
# IMAGE TO IMAGE
|
| 295 |
+
# ============================================
|
| 296 |
+
|
| 297 |
+
def do_image_to_image(source_img, prompt, neg, provider_name, api_key, model, strength, seed):
|
| 298 |
+
if source_img is None:
|
| 299 |
+
return None, "Upload a source image."
|
| 300 |
+
if not prompt:
|
| 301 |
+
return None, "Enter a prompt."
|
| 302 |
+
if not api_key or not api_key.strip():
|
| 303 |
+
return None, "API key required."
|
| 304 |
+
pkey = img_display_to_key(provider_name)
|
| 305 |
+
prov = get_provider(pkey, api_key.strip())
|
| 306 |
+
try:
|
| 307 |
+
if prov.supports_img2img:
|
| 308 |
+
img = prov.img2img(
|
| 309 |
+
prompt=prompt, image=source_img,
|
| 310 |
+
strength=float(strength), negative_prompt=neg,
|
| 311 |
+
seed=int(seed) if seed else None, model=model)
|
| 312 |
+
else:
|
| 313 |
+
img = prov.generate_image(
|
| 314 |
+
prompt=prompt, negative_prompt=neg,
|
| 315 |
+
width=source_img.width, height=source_img.height,
|
| 316 |
+
seed=int(seed) if seed else None, model=model)
|
| 317 |
+
return img, "Image transformed!"
|
| 318 |
+
except Exception as e:
|
| 319 |
+
return None, "Error: " + str(e)
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
# ============================================
|
| 323 |
+
# TEXT TO VIDEO
|
| 324 |
+
# ============================================
|
| 325 |
+
|
| 326 |
+
def do_text_to_video(prompt, provider_name, api_key, model, duration, seed, progress=gr.Progress()):
|
| 327 |
+
if not prompt:
|
| 328 |
+
return None, "Enter a prompt."
|
| 329 |
+
if not api_key or not api_key.strip():
|
| 330 |
+
return None, "API key required."
|
| 331 |
+
pkey = vid_display_to_key(provider_name)
|
| 332 |
+
prov = get_video_provider(pkey, api_key.strip())
|
| 333 |
+
if not prov.supports_t2v:
|
| 334 |
+
return None, "This provider does not support text-to-video."
|
| 335 |
+
try:
|
| 336 |
+
progress(0.1, desc="Generating video...")
|
| 337 |
+
vpath = prov.text_to_video(
|
| 338 |
+
prompt=prompt, duration=int(duration),
|
| 339 |
+
seed=int(seed) if seed else None, model=model)
|
| 340 |
+
progress(1.0, desc="Done!")
|
| 341 |
+
return vpath, "Video generated!"
|
| 342 |
+
except Exception as e:
|
| 343 |
+
return None, "Error: " + str(e)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
# ============================================
|
| 347 |
+
# IMAGE TO VIDEO
|
| 348 |
+
# ============================================
|
| 349 |
+
|
| 350 |
+
def do_image_to_video(source_img, prompt, provider_name, api_key, model, duration, seed, progress=gr.Progress()):
|
| 351 |
+
if source_img is None:
|
| 352 |
+
return None, "Upload an image."
|
| 353 |
+
if not api_key or not api_key.strip():
|
| 354 |
+
return None, "API key required."
|
| 355 |
+
pkey = vid_display_to_key(provider_name)
|
| 356 |
+
prov = get_video_provider(pkey, api_key.strip())
|
| 357 |
+
if not prov.supports_i2v:
|
| 358 |
+
return None, "This provider does not support image-to-video."
|
| 359 |
+
try:
|
| 360 |
+
progress(0.1, desc="Generating video...")
|
| 361 |
+
vpath = prov.image_to_video(
|
| 362 |
+
image=source_img, prompt=prompt or "",
|
| 363 |
+
duration=int(duration),
|
| 364 |
+
seed=int(seed) if seed else None, model=model)
|
| 365 |
+
progress(1.0, desc="Done!")
|
| 366 |
+
return vpath, "Video generated!"
|
| 367 |
+
except Exception as e:
|
| 368 |
+
return None, "Error: " + str(e)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
# ============================================
|
| 372 |
+
# FRAMES TO VIDEO
|
| 373 |
+
# ============================================
|
| 374 |
+
|
| 375 |
+
def do_frames_to_video(frame_files, fps, hold_sec, interp_mult, interp_method,
|
| 376 |
+
add_progress, add_bookend, music_file, progress=gr.Progress()):
|
| 377 |
+
if not frame_files:
|
| 378 |
+
return None, "Upload frames."
|
| 379 |
+
|
| 380 |
+
images = []
|
| 381 |
+
for f in frame_files:
|
| 382 |
+
try:
|
| 383 |
+
if hasattr(f, 'name'):
|
| 384 |
+
img = Image.open(f.name).convert("RGB")
|
| 385 |
+
else:
|
| 386 |
+
img = Image.open(f).convert("RGB")
|
| 387 |
+
images.append(img)
|
| 388 |
+
except Exception as e:
|
| 389 |
+
return None, "Error loading frame: " + str(e)
|
| 390 |
+
|
| 391 |
+
if len(images) < 2:
|
| 392 |
+
return None, "Need at least 2 frames."
|
| 393 |
+
|
| 394 |
+
# Resize all to match first image
|
| 395 |
+
target_size = images[0].size
|
| 396 |
+
images = [img.resize(target_size, Image.LANCZOS) for img in images]
|
| 397 |
+
|
| 398 |
+
mult = int(interp_mult)
|
| 399 |
+
progress(0.2, desc="Interpolating...")
|
| 400 |
+
if mult > 1:
|
| 401 |
+
smooth = interpolator.interpolate_sequence(images, mult, interp_method)
|
| 402 |
+
else:
|
| 403 |
+
smooth = images
|
| 404 |
+
|
| 405 |
+
progress(0.5, desc="Assembling...")
|
| 406 |
+
adj = float(hold_sec) / max(mult, 1) if mult > 1 else float(hold_sec)
|
| 407 |
+
vpath = assembler.create_video(
|
| 408 |
+
smooth, fps=int(fps), hold_seconds=adj,
|
| 409 |
+
add_labels=False, add_progress=add_progress,
|
| 410 |
+
add_bookend_labels=add_bookend)
|
| 411 |
+
|
| 412 |
+
if music_file:
|
| 413 |
+
progress(0.8, desc="Audio...")
|
| 414 |
+
if hasattr(music_file, 'name'):
|
| 415 |
+
vpath = assembler.add_audio_to_video(vpath, music_file.name)
|
| 416 |
+
else:
|
| 417 |
+
vpath = assembler.add_audio_to_video(vpath, music_file)
|
| 418 |
+
|
| 419 |
+
progress(1.0)
|
| 420 |
+
return vpath, "Video created from " + str(len(images)) + " frames!"
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
# ============================================
|
| 424 |
+
# INGREDIENTS TO VIDEO
|
| 425 |
+
# ============================================
|
| 426 |
+
|
| 427 |
+
def do_ingredients_to_video(
|
| 428 |
+
ingredient_files, story_prompt, provider_name, api_key, model,
|
| 429 |
+
num_transition_frames, strength, seed,
|
| 430 |
+
fps, hold_sec, interp_mult, music_file,
|
| 431 |
+
progress=gr.Progress(),
|
| 432 |
+
):
|
| 433 |
+
if not ingredient_files:
|
| 434 |
+
return None, None, "Upload ingredient images."
|
| 435 |
+
if not api_key or not api_key.strip():
|
| 436 |
+
return None, None, "API key required."
|
| 437 |
+
|
| 438 |
+
# Load ingredient images
|
| 439 |
+
ingredients = []
|
| 440 |
+
for f in ingredient_files:
|
| 441 |
+
try:
|
| 442 |
+
if hasattr(f, 'name'):
|
| 443 |
+
img = Image.open(f.name).convert("RGB")
|
| 444 |
+
else:
|
| 445 |
+
img = Image.open(f).convert("RGB")
|
| 446 |
+
ingredients.append(img)
|
| 447 |
+
except Exception as e:
|
| 448 |
+
return None, None, "Error loading: " + str(e)
|
| 449 |
+
|
| 450 |
+
if len(ingredients) < 2:
|
| 451 |
+
return None, None, "Need at least 2 ingredient images."
|
| 452 |
+
|
| 453 |
+
# Resize all to match first
|
| 454 |
+
target = ingredients[0].size
|
| 455 |
+
ingredients = [img.resize(target, Image.LANCZOS) for img in ingredients]
|
| 456 |
+
|
| 457 |
+
pkey = img_display_to_key(provider_name)
|
| 458 |
+
prov = get_provider(pkey, api_key.strip())
|
| 459 |
+
|
| 460 |
+
# Generate transition frames between ingredients
|
| 461 |
+
all_frames = [ingredients[0]]
|
| 462 |
+
total_pairs = len(ingredients) - 1
|
| 463 |
+
ntf = int(num_transition_frames)
|
| 464 |
+
|
| 465 |
+
for pair_idx in range(total_pairs):
|
| 466 |
+
progress(pair_idx / total_pairs * 0.7,
|
| 467 |
+
desc="Transitions " + str(pair_idx+1) + "/" + str(total_pairs))
|
| 468 |
+
|
| 469 |
+
img_a = ingredients[pair_idx]
|
| 470 |
+
img_b = ingredients[pair_idx + 1]
|
| 471 |
+
|
| 472 |
+
if prov.supports_img2img:
|
| 473 |
+
for t in range(1, ntf + 1):
|
| 474 |
+
frac = t / (ntf + 1)
|
| 475 |
+
# Blend images as reference
|
| 476 |
+
arr_a = np.array(img_a).astype(np.float32)
|
| 477 |
+
arr_b = np.array(img_b).astype(np.float32)
|
| 478 |
+
blended = ((1 - frac) * arr_a + frac * arr_b).astype(np.uint8)
|
| 479 |
+
blend_img = Image.fromarray(blended)
|
| 480 |
+
|
| 481 |
+
trans_prompt = story_prompt or "smooth transition between scenes"
|
| 482 |
+
trans_prompt += ", transition frame " + str(t) + " of " + str(ntf)
|
| 483 |
+
|
| 484 |
+
try:
|
| 485 |
+
gen_img = prov.img2img(
|
| 486 |
+
prompt=trans_prompt, image=blend_img,
|
| 487 |
+
strength=float(strength),
|
| 488 |
+
seed=(int(seed) + pair_idx * 100 + t) if seed else None,
|
| 489 |
+
model=model)
|
| 490 |
+
all_frames.append(gen_img)
|
| 491 |
+
except Exception:
|
| 492 |
+
all_frames.append(blend_img)
|
| 493 |
+
else:
|
| 494 |
+
# Pure blend fallback
|
| 495 |
+
for t in range(1, ntf + 1):
|
| 496 |
+
frac = t / (ntf + 1)
|
| 497 |
+
arr_a = np.array(img_a).astype(np.float32)
|
| 498 |
+
arr_b = np.array(img_b).astype(np.float32)
|
| 499 |
+
blended = ((1 - frac) * arr_a + frac * arr_b).astype(np.uint8)
|
| 500 |
+
all_frames.append(Image.fromarray(blended))
|
| 501 |
+
|
| 502 |
+
all_frames.append(img_b)
|
| 503 |
+
|
| 504 |
+
# Interpolate for smoothness
|
| 505 |
+
mult = int(interp_mult)
|
| 506 |
+
progress(0.75, desc="Smoothing...")
|
| 507 |
+
if mult > 1:
|
| 508 |
+
smooth = interpolator.interpolate_sequence(all_frames, mult, "blend")
|
| 509 |
+
else:
|
| 510 |
+
smooth = all_frames
|
| 511 |
+
|
| 512 |
+
progress(0.85, desc="Assembling...")
|
| 513 |
+
adj = float(hold_sec) / max(mult, 1) if mult > 1 else float(hold_sec)
|
| 514 |
+
vpath = assembler.create_video(
|
| 515 |
+
smooth, fps=int(fps), hold_seconds=adj,
|
| 516 |
+
add_labels=False, add_progress=True, add_bookend_labels=True)
|
| 517 |
+
|
| 518 |
+
if music_file:
|
| 519 |
+
if hasattr(music_file, 'name'):
|
| 520 |
+
vpath = assembler.add_audio_to_video(vpath, music_file.name)
|
| 521 |
+
else:
|
| 522 |
+
vpath = assembler.add_audio_to_video(vpath, music_file)
|
| 523 |
+
|
| 524 |
+
comp = assembler.create_comparison_image(ingredients[0], ingredients[-1])
|
| 525 |
+
progress(1.0)
|
| 526 |
+
return vpath, comp, "Video from " + str(len(ingredients)) + " ingredients, " + str(len(smooth)) + " frames!"
|
| 527 |
|
| 528 |
|
| 529 |
# ===============================================
|
|
|
|
| 531 |
# ===============================================
|
| 532 |
|
| 533 |
HEADER = (
|
| 534 |
+
"# TimeLapseForge v3.0 -- Full Creative Studio\n"
|
| 535 |
+
"### Timelapse | Text-to-Image | Image-to-Image | Text-to-Video | Image-to-Video "
|
| 536 |
+
"| Frames-to-Video | Ingredients-to-Video\n\n"
|
| 537 |
+
"**13+ Image APIs** + **7 Video APIs** -- Use YOUR API keys"
|
|
|
|
| 538 |
)
|
| 539 |
|
| 540 |
API_HELP = (
|
| 541 |
+
"### Image Generation APIs\n\n"
|
| 542 |
+
"| Provider | Key | Extra Package? |\n|---|---|---|\n"
|
| 543 |
+
"| Stability AI | [Get Key](https://platform.stability.ai/account/keys) | No |\n"
|
| 544 |
+
"| HuggingFace | [Get Key](https://huggingface.co/settings/tokens) | No |\n"
|
| 545 |
+
"| Fireworks AI | [Get Key](https://fireworks.ai/account/api-keys) | No |\n"
|
| 546 |
+
"| Ideogram | [Get Key](https://ideogram.ai/manage-api) | No |\n"
|
| 547 |
+
"| Leonardo | [Get Key](https://app.leonardo.ai/api-access) | No |\n"
|
| 548 |
+
"| OpenAI | [Get Key](https://platform.openai.com/api-keys) | openai |\n"
|
| 549 |
+
"| Replicate | [Get Key](https://replicate.com/account/api-tokens) | replicate |\n"
|
| 550 |
+
"| Together AI | [Get Key](https://api.together.xyz/settings/api-keys) | together |\n"
|
| 551 |
+
"| Fal.ai | [Get Key](https://fal.ai/dashboard/keys) | fal-client |\n"
|
| 552 |
+
"| Google Gemini | [Get Key](https://aistudio.google.com/apikey) | google-generativeai |\n"
|
| 553 |
+
"\n### Video Generation APIs\n\n"
|
| 554 |
+
"| Provider | Key | Extra Package? |\n|---|---|---|\n"
|
| 555 |
+
"| Stability Video | [Get Key](https://platform.stability.ai/account/keys) | No |\n"
|
| 556 |
+
"| Runway | [Get Key](https://dev.runwayml.com/) | No |\n"
|
| 557 |
+
"| Luma | [Get Key](https://lumalabs.ai/api) | No |\n"
|
| 558 |
+
"| MiniMax | [Get Key](https://platform.minimaxi.com/) | No |\n"
|
| 559 |
+
"| HuggingFace Video | [Get Key](https://huggingface.co/settings/tokens) | No |\n"
|
| 560 |
+
"| Replicate Video | [Get Key](https://replicate.com/account/api-tokens) | replicate |\n"
|
| 561 |
+
"| Fal.ai Video | [Get Key](https://fal.ai/dashboard/keys) | fal-client |\n"
|
| 562 |
)
|
| 563 |
|
| 564 |
|
| 565 |
with gr.Blocks(
|
| 566 |
+
title="TimeLapseForge v3.0",
|
| 567 |
theme=gr.themes.Soft(primary_hue="emerald", secondary_hue="blue"),
|
| 568 |
) as app:
|
| 569 |
|
|
|
|
| 574 |
|
| 575 |
with gr.Tabs():
|
| 576 |
|
| 577 |
+
# ==============================
|
| 578 |
+
# TAB: TIMELAPSE PIPELINE
|
| 579 |
+
# ==============================
|
| 580 |
+
with gr.Tab("Timelapse Pipeline"):
|
| 581 |
+
with gr.Tabs():
|
| 582 |
+
with gr.Tab("1. Parse JSON"):
|
| 583 |
+
with gr.Row():
|
| 584 |
+
with gr.Column(scale=2):
|
| 585 |
+
tl_json = gr.Textbox(label="Paste GPT JSON", lines=12)
|
| 586 |
+
with gr.Column(scale=1):
|
| 587 |
+
gr.Markdown("**-- OR -- Quick Text:**")
|
| 588 |
+
tl_quick = gr.Textbox(label="Quick Command", lines=2,
|
| 589 |
+
placeholder="Restore a rusty 1969 Camaro SS")
|
| 590 |
+
tl_panels = gr.Slider(minimum=8, maximum=40, value=20, step=1, label="Panels")
|
| 591 |
+
tl_mode = gr.Dropdown(choices=["restoration", "creation"], value="restoration", label="Mode")
|
| 592 |
+
tl_parse_btn = gr.Button("Parse and Preview", variant="primary", size="lg")
|
| 593 |
+
tl_parse_status = gr.Markdown("")
|
| 594 |
+
tl_table = gr.Dataframe(headers=["ID", "Phase", "Title", "Preview"], visible=False, wrap=True)
|
| 595 |
+
|
| 596 |
+
with gr.Tab("2. Generate"):
|
| 597 |
+
with gr.Row():
|
| 598 |
+
with gr.Column(scale=1):
|
| 599 |
+
tl_gen_mode = gr.Radio(choices=["Local (Free GPU)", "API (Your Key)"],
|
| 600 |
+
value="API (Your Key)", label="Mode")
|
| 601 |
+
with gr.Group(visible=False) as tl_local_grp:
|
| 602 |
+
tl_local_model = gr.Dropdown(choices=list(LOCAL_MODELS.keys()),
|
| 603 |
+
value="SDXL Turbo (Fast)", label="Local Model")
|
| 604 |
+
with gr.Group(visible=True) as tl_api_grp:
|
| 605 |
+
tl_img_prov = gr.Dropdown(choices=IMG_PROVIDER_CHOICES,
|
| 606 |
+
value=IMG_PROVIDER_CHOICES[0], label="Provider")
|
| 607 |
+
tl_api_key = gr.Textbox(label="API Key", type="password")
|
| 608 |
+
tl_api_model = gr.Dropdown(choices=["dall-e-3"], value="dall-e-3",
|
| 609 |
+
label="Model", allow_custom_value=True)
|
| 610 |
+
with gr.Accordion("Custom API", open=False):
|
| 611 |
+
tl_curl = gr.Textbox(label="Custom Base URL")
|
| 612 |
+
tl_eurl = gr.Textbox(label="Direct Endpoint URL")
|
| 613 |
+
tl_strength = gr.Slider(minimum=0.15, maximum=0.70, value=0.38, step=0.01,
|
| 614 |
+
label="Change Strength")
|
| 615 |
+
tl_seed = gr.Number(value=42, label="Seed", precision=0)
|
| 616 |
+
with gr.Row():
|
| 617 |
+
tl_steps = gr.Slider(minimum=0, maximum=50, value=0, step=1, label="Steps (0=auto)")
|
| 618 |
+
tl_cfg = gr.Slider(minimum=-1, maximum=15, value=-1, step=0.5, label="CFG (-1=auto)")
|
| 619 |
+
with gr.Row():
|
| 620 |
+
tl_w = gr.Number(value=1024, label="Width", precision=0)
|
| 621 |
+
tl_h = gr.Number(value=1024, label="Height", precision=0)
|
| 622 |
+
tl_ref = gr.Image(label="Reference Image", type="pil")
|
| 623 |
+
with gr.Column(scale=3):
|
| 624 |
+
tl_gen_btn = gr.Button("Generate All Panels", variant="primary", size="lg")
|
| 625 |
+
tl_gen_status = gr.Markdown("")
|
| 626 |
+
tl_gallery = gr.Gallery(label="Panels", columns=5, rows=3, height=400, object_fit="contain")
|
| 627 |
+
with gr.Accordion("Regenerate Single", open=False):
|
| 628 |
+
with gr.Row():
|
| 629 |
+
tl_regen_num = gr.Number(value=1, label="Panel #", precision=0)
|
| 630 |
+
tl_regen_btn = gr.Button("Regenerate", variant="secondary")
|
| 631 |
+
tl_regen_status = gr.Markdown("")
|
| 632 |
+
|
| 633 |
+
with gr.Tab("3. Assemble Video"):
|
| 634 |
+
with gr.Row():
|
| 635 |
+
with gr.Column(scale=1):
|
| 636 |
+
tl_interp = gr.Slider(minimum=1, maximum=8, value=3, step=1, label="Frame Multiplier")
|
| 637 |
+
tl_interp_m = gr.Dropdown(choices=["blend", "flow"], value="blend", label="Method")
|
| 638 |
+
tl_fps = gr.Slider(minimum=12, maximum=60, value=24, step=1, label="FPS")
|
| 639 |
+
tl_hold = gr.Slider(minimum=0.3, maximum=5.0, value=1.5, step=0.1, label="Sec/Panel")
|
| 640 |
+
tl_labels = gr.Checkbox(value=True, label="Timestamps")
|
| 641 |
+
tl_prog = gr.Checkbox(value=True, label="Progress bar")
|
| 642 |
+
tl_book = gr.Checkbox(value=True, label="BEFORE/AFTER")
|
| 643 |
+
tl_music = gr.File(label="Music", file_types=[".mp3", ".wav", ".ogg"])
|
| 644 |
+
tl_gif = gr.Checkbox(value=False, label="Export GIF")
|
| 645 |
+
with gr.Column(scale=2):
|
| 646 |
+
tl_asm_btn = gr.Button("Create Video", variant="primary", size="lg")
|
| 647 |
+
tl_asm_status = gr.Markdown("")
|
| 648 |
+
tl_video = gr.Video(label="Timelapse Video")
|
| 649 |
+
tl_comp = gr.Image(label="Before/After")
|
| 650 |
+
tl_gif_out = gr.File(label="GIF")
|
| 651 |
+
|
| 652 |
+
# ==============================
|
| 653 |
+
# TAB: TEXT TO IMAGE
|
| 654 |
+
# ==============================
|
| 655 |
+
with gr.Tab("Text to Image"):
|
| 656 |
with gr.Row():
|
| 657 |
+
with gr.Column(scale=1):
|
| 658 |
+
t2i_prov = gr.Dropdown(choices=IMG_PROVIDER_CHOICES, value=IMG_PROVIDER_CHOICES[0], label="Provider")
|
| 659 |
+
t2i_key = gr.Textbox(label="API Key", type="password")
|
| 660 |
+
t2i_model = gr.Dropdown(choices=["dall-e-3"], value="dall-e-3",
|
| 661 |
+
label="Model", allow_custom_value=True)
|
| 662 |
+
t2i_prompt = gr.Textbox(label="Prompt", lines=4, placeholder="A futuristic city at sunset...")
|
| 663 |
+
t2i_neg = gr.Textbox(label="Negative Prompt", lines=2, placeholder="blurry, low quality")
|
| 664 |
+
with gr.Row():
|
| 665 |
+
t2i_w = gr.Number(value=1024, label="Width", precision=0)
|
| 666 |
+
t2i_h = gr.Number(value=1024, label="Height", precision=0)
|
| 667 |
+
t2i_seed = gr.Number(value=42, label="Seed", precision=0)
|
| 668 |
with gr.Column(scale=2):
|
| 669 |
+
t2i_btn = gr.Button("Generate Image", variant="primary", size="lg")
|
| 670 |
+
t2i_status = gr.Markdown("")
|
| 671 |
+
t2i_output = gr.Image(label="Generated Image", type="pil")
|
| 672 |
+
|
| 673 |
+
# ==============================
|
| 674 |
+
# TAB: IMAGE TO IMAGE
|
| 675 |
+
# ==============================
|
| 676 |
+
with gr.Tab("Image to Image"):
|
| 677 |
+
with gr.Row():
|
| 678 |
with gr.Column(scale=1):
|
| 679 |
+
i2i_prov = gr.Dropdown(choices=IMG_PROVIDER_CHOICES, value=IMG_PROVIDER_CHOICES[0], label="Provider")
|
| 680 |
+
i2i_key = gr.Textbox(label="API Key", type="password")
|
| 681 |
+
i2i_model = gr.Dropdown(choices=["dall-e-3"], value="dall-e-3",
|
| 682 |
+
label="Model", allow_custom_value=True)
|
| 683 |
+
i2i_source = gr.Image(label="Source Image", type="pil")
|
| 684 |
+
i2i_prompt = gr.Textbox(label="Prompt", lines=3, placeholder="Transform into oil painting style...")
|
| 685 |
+
i2i_neg = gr.Textbox(label="Negative Prompt", lines=2)
|
| 686 |
+
i2i_strength = gr.Slider(minimum=0.1, maximum=0.9, value=0.5, step=0.05, label="Strength")
|
| 687 |
+
i2i_seed = gr.Number(value=42, label="Seed", precision=0)
|
| 688 |
+
with gr.Column(scale=2):
|
| 689 |
+
i2i_btn = gr.Button("Transform Image", variant="primary", size="lg")
|
| 690 |
+
i2i_status = gr.Markdown("")
|
| 691 |
+
i2i_output = gr.Image(label="Result", type="pil")
|
| 692 |
+
|
| 693 |
+
# ==============================
|
| 694 |
+
# TAB: TEXT TO VIDEO
|
| 695 |
+
# ==============================
|
| 696 |
+
with gr.Tab("Text to Video"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 697 |
with gr.Row():
|
| 698 |
with gr.Column(scale=1):
|
| 699 |
+
t2v_prov = gr.Dropdown(choices=VID_PROVIDER_CHOICES, value=VID_PROVIDER_CHOICES[0], label="Provider")
|
| 700 |
+
t2v_key = gr.Textbox(label="API Key", type="password")
|
| 701 |
+
t2v_model = gr.Dropdown(choices=["auto"], value="auto",
|
| 702 |
+
label="Model", allow_custom_value=True)
|
| 703 |
+
t2v_prompt = gr.Textbox(label="Prompt", lines=4,
|
| 704 |
+
placeholder="A drone shot flying over a mountain valley at sunrise...")
|
| 705 |
+
t2v_dur = gr.Slider(minimum=2, maximum=10, value=5, step=1, label="Duration (sec)")
|
| 706 |
+
t2v_seed = gr.Number(value=42, label="Seed", precision=0)
|
| 707 |
+
with gr.Column(scale=2):
|
| 708 |
+
t2v_btn = gr.Button("Generate Video", variant="primary", size="lg")
|
| 709 |
+
t2v_status = gr.Markdown("")
|
| 710 |
+
t2v_output = gr.Video(label="Generated Video")
|
| 711 |
+
|
| 712 |
+
# ==============================
|
| 713 |
+
# TAB: IMAGE TO VIDEO
|
| 714 |
+
# ==============================
|
| 715 |
+
with gr.Tab("Image to Video"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 716 |
with gr.Row():
|
| 717 |
with gr.Column(scale=1):
|
| 718 |
+
i2v_prov = gr.Dropdown(choices=VID_PROVIDER_CHOICES, value=VID_PROVIDER_CHOICES[0], label="Provider")
|
| 719 |
+
i2v_key = gr.Textbox(label="API Key", type="password")
|
| 720 |
+
i2v_model = gr.Dropdown(choices=["auto"], value="auto",
|
| 721 |
+
label="Model", allow_custom_value=True)
|
| 722 |
+
i2v_source = gr.Image(label="Source Image", type="pil")
|
| 723 |
+
i2v_prompt = gr.Textbox(label="Motion Prompt (optional)", lines=3,
|
| 724 |
+
placeholder="Camera slowly zooms in, leaves rustling...")
|
| 725 |
+
i2v_dur = gr.Slider(minimum=2, maximum=10, value=4, step=1, label="Duration (sec)")
|
| 726 |
+
i2v_seed = gr.Number(value=42, label="Seed", precision=0)
|
| 727 |
+
with gr.Column(scale=2):
|
| 728 |
+
i2v_btn = gr.Button("Animate Image", variant="primary", size="lg")
|
| 729 |
+
i2v_status = gr.Markdown("")
|
| 730 |
+
i2v_output = gr.Video(label="Generated Video")
|
| 731 |
+
|
| 732 |
+
# ==============================
|
| 733 |
+
# TAB: FRAMES TO VIDEO
|
| 734 |
+
# ==============================
|
| 735 |
+
with gr.Tab("Frames to Video"):
|
| 736 |
+
with gr.Row():
|
| 737 |
+
with gr.Column(scale=1):
|
| 738 |
+
f2v_files = gr.File(label="Upload Frames (in order)",
|
| 739 |
+
file_count="multiple",
|
| 740 |
+
file_types=[".png", ".jpg", ".jpeg", ".webp"])
|
| 741 |
+
f2v_fps = gr.Slider(minimum=12, maximum=60, value=24, step=1, label="FPS")
|
| 742 |
+
f2v_hold = gr.Slider(minimum=0.3, maximum=5.0, value=1.5, step=0.1, label="Sec/Frame")
|
| 743 |
+
f2v_interp = gr.Slider(minimum=1, maximum=8, value=3, step=1, label="Frame Multiplier")
|
| 744 |
+
f2v_method = gr.Dropdown(choices=["blend", "flow"], value="blend", label="Interpolation")
|
| 745 |
+
f2v_prog = gr.Checkbox(value=True, label="Progress bar")
|
| 746 |
+
f2v_book = gr.Checkbox(value=True, label="BEFORE/AFTER")
|
| 747 |
+
f2v_music = gr.File(label="Music", file_types=[".mp3", ".wav", ".ogg"])
|
| 748 |
+
with gr.Column(scale=2):
|
| 749 |
+
f2v_btn = gr.Button("Create Video from Frames", variant="primary", size="lg")
|
| 750 |
+
f2v_status = gr.Markdown("")
|
| 751 |
+
f2v_output = gr.Video(label="Video")
|
| 752 |
+
|
| 753 |
+
# ==============================
|
| 754 |
+
# TAB: INGREDIENTS TO VIDEO
|
| 755 |
+
# ==============================
|
| 756 |
+
with gr.Tab("Ingredients to Video"):
|
| 757 |
+
gr.Markdown(
|
| 758 |
+
"Upload multiple **ingredient images** (key scenes/objects). "
|
| 759 |
+
"AI generates smooth transition frames between them to create a cohesive video."
|
| 760 |
+
)
|
| 761 |
+
with gr.Row():
|
| 762 |
+
with gr.Column(scale=1):
|
| 763 |
+
ing_files = gr.File(label="Ingredient Images (in order)",
|
| 764 |
+
file_count="multiple",
|
| 765 |
+
file_types=[".png", ".jpg", ".jpeg", ".webp"])
|
| 766 |
+
ing_prompt = gr.Textbox(label="Story/Transition Prompt", lines=3,
|
| 767 |
+
placeholder="Smooth cinematic transition between scenes...")
|
| 768 |
+
ing_prov = gr.Dropdown(choices=IMG_PROVIDER_CHOICES, value=IMG_PROVIDER_CHOICES[0], label="Provider")
|
| 769 |
+
ing_key = gr.Textbox(label="API Key", type="password")
|
| 770 |
+
ing_model = gr.Dropdown(choices=["auto"], value="auto",
|
| 771 |
+
label="Model", allow_custom_value=True)
|
| 772 |
+
ing_trans = gr.Slider(minimum=2, maximum=10, value=4, step=1,
|
| 773 |
+
label="Transition Frames per Pair")
|
| 774 |
+
ing_strength = gr.Slider(minimum=0.2, maximum=0.7, value=0.4, step=0.05,
|
| 775 |
+
label="AI Strength")
|
| 776 |
+
ing_seed = gr.Number(value=42, label="Seed", precision=0)
|
| 777 |
+
ing_fps = gr.Slider(minimum=12, maximum=60, value=24, step=1, label="FPS")
|
| 778 |
+
ing_hold = gr.Slider(minimum=0.3, maximum=3.0, value=1.0, step=0.1, label="Sec/Frame")
|
| 779 |
+
ing_interp = gr.Slider(minimum=1, maximum=6, value=3, step=1, label="Smooth Multiplier")
|
| 780 |
+
ing_music = gr.File(label="Music", file_types=[".mp3", ".wav", ".ogg"])
|
| 781 |
with gr.Column(scale=2):
|
| 782 |
+
ing_btn = gr.Button("Create Ingredients Video", variant="primary", size="lg")
|
| 783 |
+
ing_status = gr.Markdown("")
|
| 784 |
+
ing_video = gr.Video(label="Video")
|
| 785 |
+
ing_comp = gr.Image(label="First vs Last")
|
| 786 |
+
|
| 787 |
+
# ==============================
|
| 788 |
+
# TAB: API KEYS GUIDE
|
| 789 |
+
# ==============================
|
|
|
|
| 790 |
with gr.Tab("API Keys Guide"):
|
| 791 |
gr.Markdown(API_HELP)
|
| 792 |
|
| 793 |
+
# ==============================
|
| 794 |
+
# DYNAMIC UI
|
| 795 |
+
# ==============================
|
| 796 |
|
| 797 |
+
def toggle_tl_mode(mode):
|
| 798 |
if mode == "Local (Free GPU)":
|
| 799 |
return gr.update(visible=True), gr.update(visible=False)
|
| 800 |
return gr.update(visible=False), gr.update(visible=True)
|
| 801 |
|
| 802 |
+
tl_gen_mode.change(fn=toggle_tl_mode, inputs=[tl_gen_mode], outputs=[tl_local_grp, tl_api_grp])
|
| 803 |
+
tl_img_prov.change(fn=update_img_models, inputs=[tl_img_prov], outputs=[tl_api_model])
|
| 804 |
+
t2i_prov.change(fn=update_img_models, inputs=[t2i_prov], outputs=[t2i_model])
|
| 805 |
+
i2i_prov.change(fn=update_img_models, inputs=[i2i_prov], outputs=[i2i_model])
|
| 806 |
+
t2v_prov.change(fn=update_vid_models, inputs=[t2v_prov], outputs=[t2v_model])
|
| 807 |
+
i2v_prov.change(fn=update_vid_models, inputs=[i2v_prov], outputs=[i2v_model])
|
| 808 |
+
ing_prov.change(fn=update_img_models, inputs=[ing_prov], outputs=[ing_model])
|
| 809 |
+
|
| 810 |
+
# ==============================
|
| 811 |
+
# EVENTS
|
| 812 |
+
# ==============================
|
| 813 |
+
|
| 814 |
+
# Timelapse
|
| 815 |
+
tl_parse_btn.click(fn=parse_input,
|
| 816 |
+
inputs=[tl_json, tl_quick, tl_panels, tl_mode],
|
| 817 |
+
outputs=[tl_parse_status, tl_table, parsed_json_state, tl_table])
|
| 818 |
+
|
| 819 |
+
tl_gen_btn.click(fn=generate_panels,
|
| 820 |
+
inputs=[parsed_json_state, tl_gen_mode, tl_local_model, tl_img_prov,
|
| 821 |
+
tl_api_key, tl_api_model, tl_curl, tl_eurl,
|
| 822 |
+
tl_strength, tl_seed, tl_steps, tl_cfg, tl_w, tl_h, tl_ref],
|
| 823 |
+
outputs=[tl_gallery, images_state, tl_gen_status])
|
| 824 |
+
|
| 825 |
+
tl_regen_btn.click(fn=regenerate_single,
|
| 826 |
+
inputs=[tl_regen_num, parsed_json_state, images_state,
|
| 827 |
+
tl_gen_mode, tl_local_model, tl_img_prov, tl_api_key,
|
| 828 |
+
tl_api_model, tl_curl, tl_eurl, tl_strength, tl_seed],
|
| 829 |
+
outputs=[images_state, tl_gallery, tl_regen_status])
|
| 830 |
+
|
| 831 |
+
tl_asm_btn.click(fn=interpolate_and_assemble,
|
| 832 |
+
inputs=[images_state, parsed_json_state, tl_interp, tl_interp_m,
|
| 833 |
+
tl_fps, tl_hold, tl_labels, tl_prog, tl_book, tl_music, tl_gif],
|
| 834 |
+
outputs=[tl_video, tl_comp, tl_gif_out, tl_asm_status])
|
| 835 |
+
|
| 836 |
+
# Text to Image
|
| 837 |
+
t2i_btn.click(fn=do_text_to_image,
|
| 838 |
+
inputs=[t2i_prompt, t2i_neg, t2i_prov, t2i_key, t2i_model, t2i_w, t2i_h, t2i_seed],
|
| 839 |
+
outputs=[t2i_output, t2i_status])
|
| 840 |
+
|
| 841 |
+
# Image to Image
|
| 842 |
+
i2i_btn.click(fn=do_image_to_image,
|
| 843 |
+
inputs=[i2i_source, i2i_prompt, i2i_neg, i2i_prov, i2i_key,
|
| 844 |
+
i2i_model, i2i_strength, i2i_seed],
|
| 845 |
+
outputs=[i2i_output, i2i_status])
|
| 846 |
+
|
| 847 |
+
# Text to Video
|
| 848 |
+
t2v_btn.click(fn=do_text_to_video,
|
| 849 |
+
inputs=[t2v_prompt, t2v_prov, t2v_key, t2v_model, t2v_dur, t2v_seed],
|
| 850 |
+
outputs=[t2v_output, t2v_status])
|
| 851 |
+
|
| 852 |
+
# Image to Video
|
| 853 |
+
i2v_btn.click(fn=do_image_to_video,
|
| 854 |
+
inputs=[i2v_source, i2v_prompt, i2v_prov, i2v_key, i2v_model, i2v_dur, i2v_seed],
|
| 855 |
+
outputs=[i2v_output, i2v_status])
|
| 856 |
+
|
| 857 |
+
# Frames to Video
|
| 858 |
+
f2v_btn.click(fn=do_frames_to_video,
|
| 859 |
+
inputs=[f2v_files, f2v_fps, f2v_hold, f2v_interp, f2v_method,
|
| 860 |
+
f2v_prog, f2v_book, f2v_music],
|
| 861 |
+
outputs=[f2v_output, f2v_status])
|
| 862 |
+
|
| 863 |
+
# Ingredients to Video
|
| 864 |
+
ing_btn.click(fn=do_ingredients_to_video,
|
| 865 |
+
inputs=[ing_files, ing_prompt, ing_prov, ing_key, ing_model,
|
| 866 |
+
ing_trans, ing_strength, ing_seed,
|
| 867 |
+
ing_fps, ing_hold, ing_interp, ing_music],
|
| 868 |
+
outputs=[ing_video, ing_comp, ing_status])
|
| 869 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 870 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 871 |
# ===============================================
|
| 872 |
if __name__ == "__main__":
|
| 873 |
app.launch(
|