Spaces:
Running on Zero
Running on Zero
Upload 11 files
Browse files- README.md +25 -34
- ai_runtime.py +302 -0
- app.py +211 -178
- fallback_generator.py +409 -0
- llm_parser.py +167 -0
- model_runtime.py +54 -0
- packages.txt +4 -0
- parser.py +148 -0
- requirements.txt +12 -0
- viewer.py +158 -0
README.md
CHANGED
|
@@ -1,54 +1,45 @@
|
|
| 1 |
---
|
| 2 |
title: Particle Blueprint 3D
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.24.0
|
| 8 |
app_file: app.py
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
---
|
| 12 |
|
| 13 |
# Particle Blueprint 3D
|
| 14 |
|
| 15 |
-
Mobile-first
|
| 16 |
|
| 17 |
-
## What this
|
| 18 |
|
| 19 |
-
-
|
| 20 |
-
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
-
|
| 25 |
-
- caches **`tencent/Hunyuan3D-Omni`** during the mesh step so the Space is prepared for deeper target-model integration later
|
| 26 |
|
| 27 |
-
##
|
| 28 |
|
| 29 |
-
This build
|
| 30 |
|
| 31 |
-
|
| 32 |
-
- **Mesh step** is real and working
|
| 33 |
-
- **Hunyuan3D-Omni cache prep** is wired in
|
| 34 |
-
- the final mesh generation still uses the app's local voxel mesher for reliability
|
| 35 |
|
| 36 |
-
|
| 37 |
|
| 38 |
-
##
|
| 39 |
|
| 40 |
-
|
| 41 |
-
- `generator.py`
|
| 42 |
-
- `model_runtime.py`
|
| 43 |
-
- `llm_parser.py`
|
| 44 |
-
- `parser.py`
|
| 45 |
-
- `requirements.txt`
|
| 46 |
-
- `packages.txt`
|
| 47 |
-
- `README.md`
|
| 48 |
-
- `LICENSE`
|
| 49 |
|
| 50 |
-
|
|
|
|
|
|
|
| 51 |
|
| 52 |
-
|
| 53 |
-
- first run with local planner models can take longer because weights need to download
|
| 54 |
-
- the 3D viewers are tuned for touch on iPhone
|
|
|
|
| 1 |
---
|
| 2 |
title: Particle Blueprint 3D
|
| 3 |
+
emoji: 🧊
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
|
|
|
| 7 |
app_file: app.py
|
| 8 |
+
python_version: "3.10"
|
| 9 |
+
startup_duration_timeout: 1h
|
| 10 |
+
preload_from_hub:
|
| 11 |
+
- tencent/Hunyuan3D-1
|
| 12 |
+
- Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled
|
| 13 |
+
- tencent/Hunyuan3D-Omni
|
| 14 |
---
|
| 15 |
|
| 16 |
# Particle Blueprint 3D
|
| 17 |
|
| 18 |
+
Mobile-first 3D concept workflow for Hugging Face Spaces.
|
| 19 |
|
| 20 |
+
## What this build does
|
| 21 |
|
| 22 |
+
- takes a text prompt
|
| 23 |
+
- tries to run **Hunyuan3D-1** for prompt-to-3D generation
|
| 24 |
+
- converts the AI result into a **particle blueprint** for inspection
|
| 25 |
+
- lets the user review that blueprint on iPhone first
|
| 26 |
+
- exports the mesh as a **GLB** when approved
|
| 27 |
+
- preloads **Hunyuan3D-Omni** into the Space cache for the next stage of controllable refinement work
|
|
|
|
| 28 |
|
| 29 |
+
## Why this shape
|
| 30 |
|
| 31 |
+
This build is aimed at a scaffold-first workflow rather than a one-shot "magic GLB" workflow. The blueprint is treated as the inspection layer between prompt and final asset.
|
| 32 |
|
| 33 |
+
## Important note
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
+
This repo is rebuilt around the official Hugging Face-hosted Tencent model weights, but the second-stage **Hunyuan3D-Omni** refinement command is still marked experimental in this build. The current mesh export step returns the Hunyuan3D-1 AI mesh normalized to GLB while also caching Omni for future control-driven refinement.
|
| 36 |
|
| 37 |
+
## First-run expectations
|
| 38 |
|
| 39 |
+
The first run can take a while because the Space may need to:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
+
1. cache the large model repos
|
| 42 |
+
2. clone the upstream Tencent code repos
|
| 43 |
+
3. run the heavier text-to-3D stack
|
| 44 |
|
| 45 |
+
If the Hunyuan path fails because the upstream repo still needs extra GPU-side dependencies in your runtime, the app falls back to the local scaffold builder so the UI still works.
|
|
|
|
|
|
ai_runtime.py
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import shutil
|
| 5 |
+
import subprocess
|
| 6 |
+
import sys
|
| 7 |
+
import tempfile
|
| 8 |
+
import zipfile
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Callable, Generator
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
import requests
|
| 15 |
+
import trimesh
|
| 16 |
+
from huggingface_hub import snapshot_download
|
| 17 |
+
|
| 18 |
+
from viewer import point_cloud_viewer_html, load_points_from_mesh_file
|
| 19 |
+
|
| 20 |
+
MODEL_TEXT3D = "tencent/Hunyuan3D-1"
|
| 21 |
+
MODEL_TEXT2IMAGE = "Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled"
|
| 22 |
+
MODEL_OMNI = "tencent/Hunyuan3D-Omni"
|
| 23 |
+
|
| 24 |
+
REPO_TEXT3D = "https://github.com/Tencent-Hunyuan/Hunyuan3D-1.git"
|
| 25 |
+
REPO_OMNI = "https://github.com/Tencent-Hunyuan/Hunyuan3D-Omni.git"
|
| 26 |
+
REPO_TEXT3D_ZIP = "https://github.com/Tencent-Hunyuan/Hunyuan3D-1/archive/refs/heads/main.zip"
|
| 27 |
+
REPO_OMNI_ZIP = "https://github.com/Tencent-Hunyuan/Hunyuan3D-Omni/archive/refs/heads/main.zip"
|
| 28 |
+
|
| 29 |
+
BASE_CACHE = Path(os.getenv("PB3D_CACHE_ROOT", "/data/pb3d_cache" if Path("/data").exists() else "./pb3d_cache"))
|
| 30 |
+
REPOS_DIR = BASE_CACHE / "repos"
|
| 31 |
+
MODELS_DIR = BASE_CACHE / "models"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class AiBlueprintSession:
|
| 36 |
+
session_dir: str
|
| 37 |
+
blueprint_path: str
|
| 38 |
+
raw_ai_mesh_path: str
|
| 39 |
+
preview_glb_path: str
|
| 40 |
+
source_model: str
|
| 41 |
+
point_count: int
|
| 42 |
+
prompt: str
|
| 43 |
+
|
| 44 |
+
def to_state(self) -> dict:
|
| 45 |
+
return {
|
| 46 |
+
"session_dir": self.session_dir,
|
| 47 |
+
"blueprint_path": self.blueprint_path,
|
| 48 |
+
"raw_ai_mesh_path": self.raw_ai_mesh_path,
|
| 49 |
+
"preview_glb_path": self.preview_glb_path,
|
| 50 |
+
"source_model": self.source_model,
|
| 51 |
+
"point_count": self.point_count,
|
| 52 |
+
"prompt": self.prompt,
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def ensure_cache_home() -> Path:
|
| 57 |
+
if Path("/data").exists():
|
| 58 |
+
os.environ.setdefault("HF_HOME", "/data/.huggingface")
|
| 59 |
+
BASE_CACHE.mkdir(parents=True, exist_ok=True)
|
| 60 |
+
REPOS_DIR.mkdir(parents=True, exist_ok=True)
|
| 61 |
+
MODELS_DIR.mkdir(parents=True, exist_ok=True)
|
| 62 |
+
return BASE_CACHE
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _download_repo_zip(zip_url: str, dest_root: Path) -> Path:
|
| 66 |
+
dest_root.parent.mkdir(parents=True, exist_ok=True)
|
| 67 |
+
archive_path = dest_root.parent / f"{dest_root.name}.zip"
|
| 68 |
+
resp = requests.get(zip_url, timeout=120)
|
| 69 |
+
resp.raise_for_status()
|
| 70 |
+
archive_path.write_bytes(resp.content)
|
| 71 |
+
with zipfile.ZipFile(archive_path, "r") as zf:
|
| 72 |
+
zf.extractall(dest_root.parent)
|
| 73 |
+
extracted = next(dest_root.parent.glob(f"{dest_root.name}-*"), None)
|
| 74 |
+
if extracted is None:
|
| 75 |
+
raise RuntimeError(f"Could not unpack {zip_url}")
|
| 76 |
+
if dest_root.exists():
|
| 77 |
+
shutil.rmtree(dest_root)
|
| 78 |
+
extracted.rename(dest_root)
|
| 79 |
+
return dest_root
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def ensure_repo_checkout(name: str, repo_url: str, zip_url: str) -> Path:
|
| 83 |
+
ensure_cache_home()
|
| 84 |
+
dest = REPOS_DIR / name
|
| 85 |
+
if (dest / ".git").exists() or dest.exists():
|
| 86 |
+
return dest
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
subprocess.run(
|
| 90 |
+
["git", "clone", "--depth", "1", repo_url, str(dest)],
|
| 91 |
+
check=True,
|
| 92 |
+
capture_output=True,
|
| 93 |
+
text=True,
|
| 94 |
+
)
|
| 95 |
+
return dest
|
| 96 |
+
except Exception:
|
| 97 |
+
return _download_repo_zip(zip_url, dest)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def ensure_model_snapshot(repo_id: str, local_dir: Path) -> Path:
|
| 101 |
+
local_dir.mkdir(parents=True, exist_ok=True)
|
| 102 |
+
snapshot_download(
|
| 103 |
+
repo_id=repo_id,
|
| 104 |
+
local_dir=str(local_dir),
|
| 105 |
+
local_dir_use_symlinks=False,
|
| 106 |
+
resume_download=True,
|
| 107 |
+
)
|
| 108 |
+
return local_dir
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def prepare_hunyuan3d1_assets(progress: Callable[[str], None] | None = None) -> Path:
|
| 112 |
+
repo_root = ensure_repo_checkout("Hunyuan3D-1", REPO_TEXT3D, REPO_TEXT3D_ZIP)
|
| 113 |
+
weights_root = repo_root / "weights"
|
| 114 |
+
if progress:
|
| 115 |
+
progress("Pulling Hunyuan3D-1 weights into the Space cache…")
|
| 116 |
+
ensure_model_snapshot(MODEL_TEXT3D, weights_root)
|
| 117 |
+
if progress:
|
| 118 |
+
progress("Pulling HunyuanDiT text-to-image weights into the Space cache…")
|
| 119 |
+
ensure_model_snapshot(MODEL_TEXT2IMAGE, weights_root / "hunyuanDiT")
|
| 120 |
+
return repo_root
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def prepare_omni_assets(progress: Callable[[str], None] | None = None) -> Path:
|
| 124 |
+
repo_root = ensure_repo_checkout("Hunyuan3D-Omni", REPO_OMNI, REPO_OMNI_ZIP)
|
| 125 |
+
if progress:
|
| 126 |
+
progress("Pulling Hunyuan3D-Omni weights into the Space cache…")
|
| 127 |
+
ensure_model_snapshot(MODEL_OMNI, MODELS_DIR / "tencent--Hunyuan3D-Omni")
|
| 128 |
+
return repo_root
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def _find_first_mesh(root: Path) -> Path:
|
| 132 |
+
candidates = []
|
| 133 |
+
for ext in ("*.glb", "*.obj", "*.ply", "*.stl", "*.off"):
|
| 134 |
+
candidates.extend(root.rglob(ext))
|
| 135 |
+
candidates = sorted(candidates, key=lambda p: (p.suffix != ".glb", len(str(p))))
|
| 136 |
+
if not candidates:
|
| 137 |
+
raise FileNotFoundError(f"No mesh artifact found under {root}")
|
| 138 |
+
return candidates[0]
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _normalize_to_glb(mesh_path: Path, out_path: Path) -> Path:
|
| 142 |
+
asset = trimesh.load(mesh_path, force="mesh")
|
| 143 |
+
if isinstance(asset, trimesh.Scene):
|
| 144 |
+
meshes = [g for g in asset.geometry.values() if isinstance(g, trimesh.Trimesh)]
|
| 145 |
+
mesh = trimesh.util.concatenate(meshes) if meshes else trimesh.creation.box()
|
| 146 |
+
elif isinstance(asset, trimesh.Trimesh):
|
| 147 |
+
mesh = asset
|
| 148 |
+
else:
|
| 149 |
+
mesh = trimesh.creation.box()
|
| 150 |
+
|
| 151 |
+
mesh.remove_unreferenced_vertices()
|
| 152 |
+
mesh.apply_translation(-mesh.bounding_box.centroid)
|
| 153 |
+
scale = float(max(mesh.extents)) or 1.0
|
| 154 |
+
mesh.apply_scale(1.0 / scale)
|
| 155 |
+
mesh.export(out_path)
|
| 156 |
+
return out_path
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def _points_to_ply(points: np.ndarray, out_path: Path) -> Path:
|
| 160 |
+
cloud = trimesh.points.PointCloud(points)
|
| 161 |
+
cloud.export(out_path)
|
| 162 |
+
return out_path
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def _run_command(cmd: list[str], cwd: Path) -> subprocess.CompletedProcess[str]:
|
| 166 |
+
return subprocess.run(cmd, cwd=str(cwd), capture_output=True, text=True)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def run_hunyuan3d1_text_to_mesh(
|
| 170 |
+
prompt: str,
|
| 171 |
+
save_dir: Path,
|
| 172 |
+
save_memory: bool = True,
|
| 173 |
+
max_faces_num: int = 90000,
|
| 174 |
+
) -> Path:
|
| 175 |
+
repo_root = prepare_hunyuan3d1_assets()
|
| 176 |
+
save_dir.mkdir(parents=True, exist_ok=True)
|
| 177 |
+
cmd = [
|
| 178 |
+
sys.executable,
|
| 179 |
+
"main.py",
|
| 180 |
+
"--text_prompt",
|
| 181 |
+
prompt,
|
| 182 |
+
"--save_folder",
|
| 183 |
+
str(save_dir),
|
| 184 |
+
"--max_faces_num",
|
| 185 |
+
str(max_faces_num),
|
| 186 |
+
]
|
| 187 |
+
if save_memory:
|
| 188 |
+
cmd.append("--save_memory")
|
| 189 |
+
|
| 190 |
+
result = _run_command(cmd, cwd=repo_root)
|
| 191 |
+
if result.returncode != 0:
|
| 192 |
+
tail = (result.stderr or result.stdout or "").strip()[-1800:]
|
| 193 |
+
raise RuntimeError(
|
| 194 |
+
"Hunyuan3D-1 failed. This usually means the Space still needs the repo's heavier CUDA-side dependencies "
|
| 195 |
+
f"or more GPU memory.\n\nLast output:\n{tail}"
|
| 196 |
+
)
|
| 197 |
+
return _find_first_mesh(save_dir)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def iter_hunyuan_blueprint_session(
|
| 201 |
+
prompt: str,
|
| 202 |
+
save_memory: bool = True,
|
| 203 |
+
max_faces_num: int = 70000,
|
| 204 |
+
preview_points: int = 3200,
|
| 205 |
+
) -> Generator[dict, None, dict]:
|
| 206 |
+
prompt = (prompt or "").strip()
|
| 207 |
+
if not prompt:
|
| 208 |
+
raise ValueError("Enter a prompt first.")
|
| 209 |
+
|
| 210 |
+
session_dir = Path(tempfile.mkdtemp(prefix="pb3d_hunyuan_session_"))
|
| 211 |
+
yield {"status": "Preparing Hugging Face cache and model repos…", "session_dir": str(session_dir)}
|
| 212 |
+
|
| 213 |
+
ensure_cache_home()
|
| 214 |
+
yield {"status": f"Queueing {MODEL_TEXT3D} for prompt-driven generation…", "session_dir": str(session_dir)}
|
| 215 |
+
raw_mesh = run_hunyuan3d1_text_to_mesh(
|
| 216 |
+
prompt=prompt,
|
| 217 |
+
save_dir=session_dir / "hunyuan3d1_output",
|
| 218 |
+
save_memory=save_memory,
|
| 219 |
+
max_faces_num=max_faces_num,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
yield {"status": "Sampling the AI mesh into an inspectable particle blueprint…", "session_dir": str(session_dir)}
|
| 223 |
+
points = load_points_from_mesh_file(raw_mesh, max_points=preview_points)
|
| 224 |
+
blueprint_path = _points_to_ply(points, session_dir / "blueprint_from_ai_mesh.ply")
|
| 225 |
+
preview_glb = _normalize_to_glb(raw_mesh, session_dir / "preview_mesh.glb")
|
| 226 |
+
|
| 227 |
+
chunks = [0.22, 0.45, 0.7, 1.0]
|
| 228 |
+
for idx, frac in enumerate(chunks, start=1):
|
| 229 |
+
count = max(180, int(len(points) * frac))
|
| 230 |
+
preview = points[:count]
|
| 231 |
+
yield {
|
| 232 |
+
"status": f"Blueprint readying for inspection ({idx}/{len(chunks)})…",
|
| 233 |
+
"viewer_html": point_cloud_viewer_html(preview, status=f"AI blueprint • {count} points"),
|
| 234 |
+
"summary": {
|
| 235 |
+
"prompt": prompt,
|
| 236 |
+
"source_model": MODEL_TEXT3D,
|
| 237 |
+
"point_count": int(count),
|
| 238 |
+
"stage": idx,
|
| 239 |
+
"stage_count": len(chunks),
|
| 240 |
+
"raw_ai_mesh_path": str(raw_mesh),
|
| 241 |
+
},
|
| 242 |
+
"session_dir": str(session_dir),
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
state = AiBlueprintSession(
|
| 246 |
+
session_dir=str(session_dir),
|
| 247 |
+
blueprint_path=str(blueprint_path),
|
| 248 |
+
raw_ai_mesh_path=str(raw_mesh),
|
| 249 |
+
preview_glb_path=str(preview_glb),
|
| 250 |
+
source_model=MODEL_TEXT3D,
|
| 251 |
+
point_count=int(len(points)),
|
| 252 |
+
prompt=prompt,
|
| 253 |
+
).to_state()
|
| 254 |
+
|
| 255 |
+
yield {
|
| 256 |
+
"status": "Blueprint ready. Rotate it on iPhone, then make the mesh when happy.",
|
| 257 |
+
"viewer_html": point_cloud_viewer_html(points, status=f"AI blueprint • {len(points)} points"),
|
| 258 |
+
"summary": {**state, "mode": "ai_blueprint_from_mesh"},
|
| 259 |
+
"blueprint_path": str(blueprint_path),
|
| 260 |
+
"state": state,
|
| 261 |
+
"mesh_preview": str(preview_glb),
|
| 262 |
+
"session_dir": str(session_dir),
|
| 263 |
+
}
|
| 264 |
+
return state
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def finalize_ai_mesh_session(state: dict, prepare_omni: bool = True) -> Generator[dict, None, dict]:
|
| 268 |
+
mesh_path = Path(state["raw_ai_mesh_path"])
|
| 269 |
+
session_dir = Path(state["session_dir"])
|
| 270 |
+
|
| 271 |
+
if prepare_omni:
|
| 272 |
+
try:
|
| 273 |
+
yield {"status": f"Preloading {MODEL_OMNI} for future controllable refinement…"}
|
| 274 |
+
prepare_omni_assets()
|
| 275 |
+
omni_note = f"{MODEL_OMNI} cached."
|
| 276 |
+
except Exception as exc:
|
| 277 |
+
omni_note = f"Could not cache {MODEL_OMNI}: {exc}"
|
| 278 |
+
else:
|
| 279 |
+
omni_note = "Skipped."
|
| 280 |
+
|
| 281 |
+
yield {"status": "Centering and converting the AI mesh to exportable GLB…"}
|
| 282 |
+
glb_path = _normalize_to_glb(mesh_path, session_dir / "final_mesh.glb")
|
| 283 |
+
mesh = trimesh.load(glb_path, force="mesh")
|
| 284 |
+
if isinstance(mesh, trimesh.Scene):
|
| 285 |
+
mesh = trimesh.util.concatenate([g for g in mesh.geometry.values() if isinstance(g, trimesh.Trimesh)])
|
| 286 |
+
|
| 287 |
+
summary = {
|
| 288 |
+
**state,
|
| 289 |
+
"mesh_path": str(glb_path),
|
| 290 |
+
"mesh_source": MODEL_TEXT3D,
|
| 291 |
+
"omni_cache_note": omni_note,
|
| 292 |
+
"vertex_count": int(len(mesh.vertices)) if isinstance(mesh, trimesh.Trimesh) else None,
|
| 293 |
+
"face_count": int(len(mesh.faces)) if isinstance(mesh, trimesh.Trimesh) else None,
|
| 294 |
+
"note": "This export is the AI mesh produced during the blueprint stage, normalized for download. Hunyuan3D-Omni is preloaded but not yet driving the second-stage refinement command in this build.",
|
| 295 |
+
}
|
| 296 |
+
yield {
|
| 297 |
+
"status": "Mesh ready.",
|
| 298 |
+
"mesh_path": str(glb_path),
|
| 299 |
+
"summary": summary,
|
| 300 |
+
"mesh_file": str(glb_path),
|
| 301 |
+
}
|
| 302 |
+
return summary
|
app.py
CHANGED
|
@@ -2,48 +2,42 @@ from __future__ import annotations
|
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
|
| 5 |
-
from
|
| 6 |
-
from
|
| 7 |
-
from
|
| 8 |
-
|
| 9 |
|
| 10 |
TITLE = "Particle Blueprint 3D"
|
| 11 |
-
TAGLINE = "
|
| 12 |
-
|
| 13 |
-
"
|
| 14 |
-
"
|
| 15 |
)
|
| 16 |
|
| 17 |
-
|
| 18 |
-
"Cargo hauler": "small cargo hauler with a boxy hull,
|
| 19 |
-
"Compact fighter": "compact fighter with
|
| 20 |
-
"Industrial
|
| 21 |
}
|
| 22 |
|
| 23 |
CSS = """
|
| 24 |
footer {display:none !important}
|
| 25 |
-
.gradio-container {max-width:
|
| 26 |
-
|
| 27 |
-
.hero-
|
| 28 |
-
.hero-
|
| 29 |
-
.
|
| 30 |
-
.flow
|
| 31 |
-
.flow
|
| 32 |
-
.
|
| 33 |
-
.
|
| 34 |
-
.cta-row button {min-height: 52px !important; border-radius: 16px !important; font-size: 1rem !important}
|
| 35 |
-
.preset-row {display:grid; grid-template-columns: repeat(3, minmax(0,1fr)); gap: 8px}
|
| 36 |
-
.preset-row button {min-height: 46px !important; border-radius: 16px !important}
|
| 37 |
-
.model3d-wrap {border-radius: 20px; overflow: hidden}
|
| 38 |
-
.mobile-note {font-size: 0.92rem; opacity: 0.82}
|
| 39 |
-
.small-note {font-size:0.88rem; opacity:0.8}
|
| 40 |
#status-box p {margin:0}
|
|
|
|
| 41 |
@media (max-width: 820px) {
|
| 42 |
-
.gradio-container {padding:
|
| 43 |
-
.hero
|
| 44 |
-
.
|
| 45 |
-
.
|
| 46 |
-
.cta-row {position: sticky; bottom: 10px; z-index: 10; background: rgba(15,18,24,0.90); backdrop-filter: blur(12px); padding: 8px; border: 1px solid rgba(255,255,255,0.08); border-radius: 18px}
|
| 47 |
}
|
| 48 |
"""
|
| 49 |
|
|
@@ -52,185 +46,224 @@ def _status_md(text: str) -> str:
|
|
| 52 |
return f"**Status**\n\n{text}"
|
| 53 |
|
| 54 |
|
| 55 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
prompt = (prompt or "").strip()
|
| 57 |
if not prompt:
|
| 58 |
raise gr.Error("Enter a prompt first.")
|
| 59 |
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
| 64 |
|
| 65 |
yield (
|
| 66 |
_status_md("Starting blueprint generation…"),
|
| 67 |
-
|
| 68 |
None,
|
| 69 |
None,
|
| 70 |
None,
|
| 71 |
gr.update(interactive=False),
|
|
|
|
| 72 |
)
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
yield (
|
| 85 |
-
_status_md(
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
gr.update(interactive=bool(
|
|
|
|
| 91 |
)
|
| 92 |
|
| 93 |
|
| 94 |
-
def stream_mesh(state: dict,
|
| 95 |
if not state:
|
| 96 |
raise gr.Error("Generate a blueprint first.")
|
| 97 |
|
| 98 |
-
latest_summary = None
|
| 99 |
-
latest_mesh_path = None
|
| 100 |
yield _status_md("Starting mesh generation…"), None, None, None
|
| 101 |
|
| 102 |
-
|
| 103 |
-
state
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
|
| 112 |
|
| 113 |
with gr.Blocks(theme=gr.themes.Soft(), css=CSS, title=TITLE, fill_width=True) as demo:
|
| 114 |
session_state = gr.State(value=None)
|
| 115 |
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
)
|
| 132 |
|
| 133 |
-
with gr.
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
)
|
| 141 |
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
)
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
blueprint_view = gr.Model3D(
|
| 179 |
-
label="Particle blueprint (.ply)",
|
| 180 |
-
display_mode="point_cloud",
|
| 181 |
-
clear_color=(0.02, 0.02, 0.03, 1.0),
|
| 182 |
-
camera_position=(35, 65, 6),
|
| 183 |
-
zoom_speed=1.15,
|
| 184 |
-
pan_speed=0.95,
|
| 185 |
-
elem_classes=["model3d-wrap"],
|
| 186 |
-
height=540,
|
| 187 |
-
)
|
| 188 |
-
with gr.TabItem("Mesh"):
|
| 189 |
-
mesh_view = gr.Model3D(
|
| 190 |
-
label="Mesh preview (.glb)",
|
| 191 |
-
display_mode="solid",
|
| 192 |
-
clear_color=(0.02, 0.02, 0.03, 1.0),
|
| 193 |
-
camera_position=(35, 65, 6),
|
| 194 |
-
zoom_speed=1.15,
|
| 195 |
-
pan_speed=0.95,
|
| 196 |
-
elem_classes=["model3d-wrap"],
|
| 197 |
-
height=540,
|
| 198 |
-
)
|
| 199 |
-
with gr.TabItem("Summary and files"):
|
| 200 |
-
summary = gr.JSON(label="Session summary")
|
| 201 |
-
blueprint_file = gr.File(label="Download blueprint (.ply)")
|
| 202 |
-
mesh_file = gr.File(label="Download mesh (.glb)")
|
| 203 |
-
|
| 204 |
-
preset_a.click(lambda: PROMPT_PRESETS["Cargo hauler"], outputs=prompt)
|
| 205 |
-
preset_b.click(lambda: PROMPT_PRESETS["Compact fighter"], outputs=prompt)
|
| 206 |
-
preset_c.click(lambda: PROMPT_PRESETS["Industrial dropship"], outputs=prompt)
|
| 207 |
-
|
| 208 |
-
blueprint_btn.click(
|
| 209 |
-
fn=stream_blueprint,
|
| 210 |
-
inputs=[prompt, parser_mode, model_choice, detail],
|
| 211 |
-
outputs=[status, blueprint_view, summary, blueprint_file, session_state, mesh_btn],
|
| 212 |
-
)
|
| 213 |
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
|
| 235 |
|
| 236 |
if __name__ == "__main__":
|
|
|
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
|
| 5 |
+
from ai_runtime import MODEL_OMNI, MODEL_TEXT3D, iter_hunyuan_blueprint_session, finalize_ai_mesh_session
|
| 6 |
+
from fallback_generator import iter_blueprint_session as iter_fallback_blueprint
|
| 7 |
+
from fallback_generator import iter_meshify_session as iter_fallback_mesh
|
| 8 |
+
from viewer import empty_viewer_html, load_points_from_cloud_file, point_cloud_viewer_html
|
| 9 |
|
| 10 |
TITLE = "Particle Blueprint 3D"
|
| 11 |
+
TAGLINE = "Prompt → AI blueprint → inspect on iPhone → export mesh"
|
| 12 |
+
SUBTITLE = (
|
| 13 |
+
"Rebuilt around Hugging Face-hosted Tencent Hunyuan 3D models. "
|
| 14 |
+
"The first stage uses Hunyuan3D-1 for prompt-driven 3D generation, then converts that AI output into a particle blueprint for review."
|
| 15 |
)
|
| 16 |
|
| 17 |
+
PROMPTS = {
|
| 18 |
+
"Cargo hauler": "small cargo hauler with a boxy hull, rear ramp, cargo bay, 4 engines and landing gear",
|
| 19 |
+
"Compact fighter": "compact fighter with a sleek hull, twin engines, short wings and a small cockpit",
|
| 20 |
+
"Industrial shuttle": "industrial shuttle with a rounded hull, cargo hold, fin tail and landing gear",
|
| 21 |
}
|
| 22 |
|
| 23 |
CSS = """
|
| 24 |
footer {display:none !important}
|
| 25 |
+
.gradio-container {max-width: 1080px !important; margin: 0 auto; padding: 0 12px 28px !important}
|
| 26 |
+
.hero, .panel, .status-card {border:1px solid rgba(255,255,255,.08); border-radius:22px; background:rgba(255,255,255,.03); padding:14px 16px}
|
| 27 |
+
.hero-title {font-size:1.5rem; font-weight:800; margin:0 0 6px 0}
|
| 28 |
+
.hero-sub {opacity:.88; margin:0}
|
| 29 |
+
.flow {display:grid; grid-template-columns:repeat(4, minmax(0,1fr)); gap:10px; margin-top:10px}
|
| 30 |
+
.flow div {border:1px solid rgba(255,255,255,.08); border-radius:16px; padding:12px; background:rgba(255,255,255,.02)}
|
| 31 |
+
.flow strong {display:block; margin-bottom:4px}
|
| 32 |
+
.cta-row button {min-height:52px !important; border-radius:16px !important; font-size:1rem !important}
|
| 33 |
+
.preset-row button {min-height:44px !important; border-radius:14px !important}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
#status-box p {margin:0}
|
| 35 |
+
.small-note {font-size:.88rem; opacity:.8}
|
| 36 |
@media (max-width: 820px) {
|
| 37 |
+
.gradio-container {padding:0 10px 22px !important}
|
| 38 |
+
.hero,.panel,.status-card {padding:12px}
|
| 39 |
+
.flow {grid-template-columns:1fr}
|
| 40 |
+
.cta-row {position:sticky; bottom:10px; z-index:12; background:rgba(10,13,22,.92); backdrop-filter:blur(12px); padding:8px; border:1px solid rgba(255,255,255,.08); border-radius:18px}
|
|
|
|
| 41 |
}
|
| 42 |
"""
|
| 43 |
|
|
|
|
| 46 |
return f"**Status**\n\n{text}"
|
| 47 |
|
| 48 |
|
| 49 |
+
def _blueprint_html_from_ply(path: str | None) -> str:
|
| 50 |
+
if not path:
|
| 51 |
+
return empty_viewer_html()
|
| 52 |
+
try:
|
| 53 |
+
points = load_points_from_cloud_file(path)
|
| 54 |
+
return point_cloud_viewer_html(points, status=f"Blueprint • {len(points)} points")
|
| 55 |
+
except Exception as exc:
|
| 56 |
+
return empty_viewer_html(f"Blueprint file exists but the viewer could not read it: {exc}")
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def stream_blueprint(prompt: str, mode: str, save_memory: bool, max_faces: int, detail: int):
|
| 60 |
prompt = (prompt or "").strip()
|
| 61 |
if not prompt:
|
| 62 |
raise gr.Error("Enter a prompt first.")
|
| 63 |
|
| 64 |
+
html = empty_viewer_html("Starting…")
|
| 65 |
+
summary = None
|
| 66 |
+
state = None
|
| 67 |
+
blueprint_file = None
|
| 68 |
+
mesh_preview = None
|
| 69 |
|
| 70 |
yield (
|
| 71 |
_status_md("Starting blueprint generation…"),
|
| 72 |
+
html,
|
| 73 |
None,
|
| 74 |
None,
|
| 75 |
None,
|
| 76 |
gr.update(interactive=False),
|
| 77 |
+
None,
|
| 78 |
)
|
| 79 |
|
| 80 |
+
if mode == "hunyuan":
|
| 81 |
+
try:
|
| 82 |
+
for update in iter_hunyuan_blueprint_session(
|
| 83 |
+
prompt=prompt,
|
| 84 |
+
save_memory=save_memory,
|
| 85 |
+
max_faces_num=int(max_faces),
|
| 86 |
+
preview_points=max(2200, int(detail) * 140),
|
| 87 |
+
):
|
| 88 |
+
html = update.get("viewer_html", html)
|
| 89 |
+
summary = update.get("summary", summary)
|
| 90 |
+
blueprint_file = update.get("blueprint_path", blueprint_file)
|
| 91 |
+
mesh_preview = update.get("mesh_preview", mesh_preview)
|
| 92 |
+
state = update.get("state", state)
|
| 93 |
+
yield (
|
| 94 |
+
_status_md(update.get("status", "Working…")),
|
| 95 |
+
html,
|
| 96 |
+
summary,
|
| 97 |
+
blueprint_file,
|
| 98 |
+
state,
|
| 99 |
+
gr.update(interactive=bool(state)),
|
| 100 |
+
mesh_preview,
|
| 101 |
+
)
|
| 102 |
+
return
|
| 103 |
+
except Exception as exc:
|
| 104 |
+
fallback_notice = f"AI model path failed, so the app is falling back to the local scaffold builder. Reason: {exc}"
|
| 105 |
+
yield (
|
| 106 |
+
_status_md(fallback_notice),
|
| 107 |
+
html,
|
| 108 |
+
{"fallback_reason": str(exc), "requested_mode": mode, "prompt": prompt},
|
| 109 |
+
blueprint_file,
|
| 110 |
+
state,
|
| 111 |
+
gr.update(interactive=False),
|
| 112 |
+
mesh_preview,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
for update in iter_fallback_blueprint(prompt=prompt, detail=detail, parser_mode="heuristic"):
|
| 116 |
+
blueprint_path = update.get("blueprint_path")
|
| 117 |
+
if blueprint_path:
|
| 118 |
+
html = _blueprint_html_from_ply(blueprint_path)
|
| 119 |
+
blueprint_file = blueprint_path
|
| 120 |
+
summary = update.get("summary", summary)
|
| 121 |
+
state = update.get("state", state)
|
| 122 |
yield (
|
| 123 |
+
_status_md(update.get("status", "Working…")),
|
| 124 |
+
html,
|
| 125 |
+
summary,
|
| 126 |
+
blueprint_file,
|
| 127 |
+
state,
|
| 128 |
+
gr.update(interactive=bool(state)),
|
| 129 |
+
mesh_preview,
|
| 130 |
)
|
| 131 |
|
| 132 |
|
| 133 |
+
def stream_mesh(state: dict, prepare_omni: bool, voxel_pitch: float):
|
| 134 |
if not state:
|
| 135 |
raise gr.Error("Generate a blueprint first.")
|
| 136 |
|
|
|
|
|
|
|
| 137 |
yield _status_md("Starting mesh generation…"), None, None, None
|
| 138 |
|
| 139 |
+
if state.get("raw_ai_mesh_path"):
|
| 140 |
+
for update in finalize_ai_mesh_session(state, prepare_omni=prepare_omni):
|
| 141 |
+
yield (
|
| 142 |
+
_status_md(update.get("status", "Meshing…")),
|
| 143 |
+
update.get("mesh_path"),
|
| 144 |
+
update.get("mesh_file"),
|
| 145 |
+
update.get("summary"),
|
| 146 |
+
)
|
| 147 |
+
return
|
| 148 |
+
|
| 149 |
+
for update in iter_fallback_mesh(state=state, voxel_pitch=voxel_pitch, use_target_model_cache=prepare_omni):
|
| 150 |
+
yield (
|
| 151 |
+
_status_md(update.get("status", "Meshing…")),
|
| 152 |
+
update.get("mesh_path"),
|
| 153 |
+
update.get("mesh_file"),
|
| 154 |
+
update.get("summary"),
|
| 155 |
+
)
|
| 156 |
|
| 157 |
|
| 158 |
with gr.Blocks(theme=gr.themes.Soft(), css=CSS, title=TITLE, fill_width=True) as demo:
|
| 159 |
session_state = gr.State(value=None)
|
| 160 |
|
| 161 |
+
gr.HTML(
|
| 162 |
+
f"""
|
| 163 |
+
<div class='hero'>
|
| 164 |
+
<div class='hero-title'>{TITLE}</div>
|
| 165 |
+
<p class='hero-sub'><strong>{TAGLINE}</strong><br>{SUBTITLE}</p>
|
| 166 |
+
<div class='flow'>
|
| 167 |
+
<div><strong>1. Describe</strong>Write what you want.</div>
|
| 168 |
+
<div><strong>2. Generate blueprint</strong>{MODEL_TEXT3D} runs, then its AI mesh is turned into a particle blueprint.</div>
|
| 169 |
+
<div><strong>3. Inspect</strong>Rotate, zoom and pan the blueprint on iPhone before committing.</div>
|
| 170 |
+
<div><strong>4. Make mesh</strong>Export the AI mesh as GLB, with {MODEL_OMNI} preloaded for later refinement work.</div>
|
| 171 |
+
</div>
|
| 172 |
+
</div>
|
| 173 |
+
"""
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
with gr.Column(elem_classes=["panel"]):
|
| 177 |
+
prompt = gr.Textbox(
|
| 178 |
+
label="Describe the model",
|
| 179 |
+
lines=4,
|
| 180 |
+
max_lines=7,
|
| 181 |
+
placeholder="Example: small cargo hauler with a boxy hull, cargo bay, rear ramp, 4 engines and landing gear",
|
| 182 |
)
|
| 183 |
|
| 184 |
+
with gr.Row(elem_classes=["preset-row"]):
|
| 185 |
+
p1 = gr.Button("Cargo hauler")
|
| 186 |
+
p2 = gr.Button("Compact fighter")
|
| 187 |
+
p3 = gr.Button("Industrial shuttle")
|
| 188 |
+
|
| 189 |
+
with gr.Accordion("Generation settings", open=False):
|
| 190 |
+
mode = gr.Radio(
|
| 191 |
+
choices=[("Hunyuan3D-1 AI", "hunyuan"), ("Local fallback scaffold", "fallback")],
|
| 192 |
+
value="hunyuan",
|
| 193 |
+
label="Blueprint generation mode",
|
| 194 |
+
info="Use Hunyuan first. If its repo dependencies are not fully ready in the Space yet, the app will fall back automatically.",
|
| 195 |
+
)
|
| 196 |
+
save_memory = gr.Checkbox(
|
| 197 |
+
value=True,
|
| 198 |
+
label="Use save-memory mode for Hunyuan3D-1",
|
| 199 |
+
info="Useful on tighter ZeroGPU runs.",
|
| 200 |
+
)
|
| 201 |
+
max_faces = gr.Slider(20000, 90000, value=70000, step=5000, label="Max faces for the AI mesh")
|
| 202 |
+
detail = gr.Slider(14, 34, value=22, step=2, label="Blueprint preview density")
|
| 203 |
+
voxel_pitch = gr.Slider(0.055, 0.12, value=0.085, step=0.005, label="Fallback mesh density")
|
| 204 |
+
prepare_omni = gr.Checkbox(
|
| 205 |
+
value=True,
|
| 206 |
+
label=f"Preload {MODEL_OMNI} during mesh step",
|
| 207 |
+
info="This caches the controllable refinement model in the Space even though this build still exports the Hunyuan3D-1 mesh on step two.",
|
| 208 |
)
|
| 209 |
|
| 210 |
+
with gr.Row(elem_classes=["cta-row"]):
|
| 211 |
+
blueprint_btn = gr.Button("Generate blueprint", variant="primary")
|
| 212 |
+
mesh_btn = gr.Button("Make mesh", interactive=False)
|
| 213 |
+
clear_btn = gr.Button("Clear")
|
| 214 |
+
|
| 215 |
+
with gr.Column(elem_classes=["status-card"]):
|
| 216 |
+
status = gr.Markdown(_status_md("Ready."), elem_id="status-box")
|
| 217 |
+
gr.Markdown("<span class='small-note'>On iPhone: one finger orbits. Two fingers pan and zoom.</span>")
|
| 218 |
+
|
| 219 |
+
with gr.Tabs():
|
| 220 |
+
with gr.TabItem("Blueprint"):
|
| 221 |
+
blueprint_view = gr.HTML(value=empty_viewer_html(), label="Blueprint viewer")
|
| 222 |
+
with gr.TabItem("Mesh"):
|
| 223 |
+
mesh_view = gr.Model3D(
|
| 224 |
+
label="Mesh preview (.glb)",
|
| 225 |
+
display_mode="solid",
|
| 226 |
+
clear_color=(0.02, 0.02, 0.03, 1.0),
|
| 227 |
+
camera_position=(35, 65, 6),
|
| 228 |
+
zoom_speed=1.15,
|
| 229 |
+
pan_speed=0.95,
|
| 230 |
+
height=560,
|
| 231 |
+
)
|
| 232 |
+
with gr.TabItem("Summary and files"):
|
| 233 |
+
summary = gr.JSON(label="Session summary")
|
| 234 |
+
blueprint_file = gr.File(label="Particle blueprint (.ply)")
|
| 235 |
+
mesh_file = gr.File(label="Mesh export (.glb)")
|
| 236 |
+
|
| 237 |
+
p1.click(lambda: PROMPTS["Cargo hauler"], outputs=prompt)
|
| 238 |
+
p2.click(lambda: PROMPTS["Compact fighter"], outputs=prompt)
|
| 239 |
+
p3.click(lambda: PROMPTS["Industrial shuttle"], outputs=prompt)
|
| 240 |
+
|
| 241 |
+
blueprint_btn.click(
|
| 242 |
+
fn=stream_blueprint,
|
| 243 |
+
inputs=[prompt, mode, save_memory, max_faces, detail],
|
| 244 |
+
outputs=[status, blueprint_view, summary, blueprint_file, session_state, mesh_btn, mesh_view],
|
| 245 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
|
| 247 |
+
mesh_btn.click(
|
| 248 |
+
fn=stream_mesh,
|
| 249 |
+
inputs=[session_state, prepare_omni, voxel_pitch],
|
| 250 |
+
outputs=[status, mesh_view, mesh_file, summary],
|
| 251 |
+
)
|
| 252 |
|
| 253 |
+
clear_btn.click(
|
| 254 |
+
lambda: (
|
| 255 |
+
"",
|
| 256 |
+
_status_md("Ready."),
|
| 257 |
+
empty_viewer_html(),
|
| 258 |
+
None,
|
| 259 |
+
None,
|
| 260 |
+
None,
|
| 261 |
+
None,
|
| 262 |
+
gr.update(interactive=False),
|
| 263 |
+
None,
|
| 264 |
+
),
|
| 265 |
+
outputs=[prompt, status, blueprint_view, mesh_view, summary, blueprint_file, mesh_file, mesh_btn, session_state],
|
| 266 |
+
)
|
| 267 |
|
| 268 |
|
| 269 |
if __name__ == "__main__":
|
fallback_generator.py
ADDED
|
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
import tempfile
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Generator
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import trimesh
|
| 11 |
+
from scipy import ndimage
|
| 12 |
+
from skimage import measure
|
| 13 |
+
|
| 14 |
+
from llm_parser import DEFAULT_LOCAL_MODEL, parse_prompt_with_local_llm
|
| 15 |
+
from model_runtime import TARGET_OMNI_MODEL, ensure_target_model_cached
|
| 16 |
+
from parser import PromptSpec, parse_prompt
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclass
|
| 20 |
+
class BuildArtifacts:
|
| 21 |
+
ply_path: str
|
| 22 |
+
glb_path: str
|
| 23 |
+
summary: dict
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
SCALE_FACTORS = {
|
| 27 |
+
"small": 1.0,
|
| 28 |
+
"medium": 1.35,
|
| 29 |
+
"large": 1.85,
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _sample_box_surface(center, size, density: int, label: int) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
| 34 |
+
cx, cy, cz = center
|
| 35 |
+
sx, sy, sz = size
|
| 36 |
+
n = max(4, density)
|
| 37 |
+
u = np.linspace(-0.5, 0.5, n)
|
| 38 |
+
vv = np.linspace(-0.5, 0.5, n)
|
| 39 |
+
pts = []
|
| 40 |
+
normals = []
|
| 41 |
+
labels = []
|
| 42 |
+
for ax in (-1, 1):
|
| 43 |
+
x = np.full((n, n), cx + ax * sx / 2)
|
| 44 |
+
y, z = np.meshgrid(u * sy + cy, vv * sz + cz)
|
| 45 |
+
pts.append(np.column_stack([x.ravel(), y.ravel(), z.ravel()]))
|
| 46 |
+
normals.append(np.tile([ax, 0, 0], (n * n, 1)))
|
| 47 |
+
labels.append(np.full(n * n, label))
|
| 48 |
+
for ay in (-1, 1):
|
| 49 |
+
y = np.full((n, n), cy + ay * sy / 2)
|
| 50 |
+
x, z = np.meshgrid(u * sx + cx, vv * sz + cz)
|
| 51 |
+
pts.append(np.column_stack([x.ravel(), y.ravel(), z.ravel()]))
|
| 52 |
+
normals.append(np.tile([0, ay, 0], (n * n, 1)))
|
| 53 |
+
labels.append(np.full(n * n, label))
|
| 54 |
+
for az in (-1, 1):
|
| 55 |
+
z = np.full((n, n), cz + az * sz / 2)
|
| 56 |
+
x, y = np.meshgrid(u * sx + cx, vv * sy + cy)
|
| 57 |
+
pts.append(np.column_stack([x.ravel(), y.ravel(), z.ravel()]))
|
| 58 |
+
normals.append(np.tile([0, 0, az], (n * n, 1)))
|
| 59 |
+
labels.append(np.full(n * n, label))
|
| 60 |
+
return np.vstack(pts), np.vstack(normals), np.concatenate(labels)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _sample_ellipsoid_surface(center, radii, density: int, label: int) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
| 64 |
+
cx, cy, cz = center
|
| 65 |
+
rx, ry, rz = radii
|
| 66 |
+
nu = max(16, density * 3)
|
| 67 |
+
nv = max(10, density * 2)
|
| 68 |
+
u = np.linspace(0, 2 * math.pi, nu, endpoint=False)
|
| 69 |
+
v = np.linspace(-math.pi / 2, math.pi / 2, nv)
|
| 70 |
+
uu, vv = np.meshgrid(u, v)
|
| 71 |
+
x = cx + rx * np.cos(vv) * np.cos(uu)
|
| 72 |
+
y = cy + ry * np.cos(vv) * np.sin(uu)
|
| 73 |
+
z = cz + rz * np.sin(vv)
|
| 74 |
+
pts = np.column_stack([x.ravel(), y.ravel(), z.ravel()])
|
| 75 |
+
normals = np.column_stack([
|
| 76 |
+
(x - cx).ravel() / max(rx, 1e-6),
|
| 77 |
+
(y - cy).ravel() / max(ry, 1e-6),
|
| 78 |
+
(z - cz).ravel() / max(rz, 1e-6),
|
| 79 |
+
])
|
| 80 |
+
normals /= np.linalg.norm(normals, axis=1, keepdims=True) + 1e-8
|
| 81 |
+
labels = np.full(len(pts), label)
|
| 82 |
+
return pts, normals, labels
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _sample_cylinder_surface(center, radius, length, axis: str, density: int, label: int) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
| 86 |
+
cx, cy, cz = center
|
| 87 |
+
nt = max(18, density * 4)
|
| 88 |
+
nl = max(6, density)
|
| 89 |
+
theta = np.linspace(0, 2 * math.pi, nt, endpoint=False)
|
| 90 |
+
line = np.linspace(-length / 2, length / 2, nl)
|
| 91 |
+
tt, ll = np.meshgrid(theta, line)
|
| 92 |
+
if axis == "x":
|
| 93 |
+
x = cx + ll
|
| 94 |
+
y = cy + radius * np.cos(tt)
|
| 95 |
+
z = cz + radius * np.sin(tt)
|
| 96 |
+
normals = np.column_stack([np.zeros(x.size), np.cos(tt).ravel(), np.sin(tt).ravel()])
|
| 97 |
+
elif axis == "y":
|
| 98 |
+
x = cx + radius * np.cos(tt)
|
| 99 |
+
y = cy + ll
|
| 100 |
+
z = cz + radius * np.sin(tt)
|
| 101 |
+
normals = np.column_stack([np.cos(tt).ravel(), np.zeros(x.size), np.sin(tt).ravel()])
|
| 102 |
+
else:
|
| 103 |
+
x = cx + radius * np.cos(tt)
|
| 104 |
+
y = cy + radius * np.sin(tt)
|
| 105 |
+
z = cz + ll
|
| 106 |
+
normals = np.column_stack([np.cos(tt).ravel(), np.sin(tt).ravel(), np.zeros(x.size)])
|
| 107 |
+
pts = np.column_stack([x.ravel(), y.ravel(), z.ravel()])
|
| 108 |
+
labels = np.full(len(pts), label)
|
| 109 |
+
return pts, normals, labels
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def export_point_cloud_as_ply(points: np.ndarray, labels: np.ndarray, path: str) -> str:
|
| 113 |
+
colors = np.array([
|
| 114 |
+
[170, 170, 180],
|
| 115 |
+
[120, 180, 255],
|
| 116 |
+
[255, 190, 120],
|
| 117 |
+
[180, 180, 255],
|
| 118 |
+
[255, 120, 120],
|
| 119 |
+
[200, 255, 180],
|
| 120 |
+
[255, 255, 180],
|
| 121 |
+
], dtype=np.uint8)
|
| 122 |
+
c = colors[labels % len(colors)]
|
| 123 |
+
pc = trimesh.points.PointCloud(vertices=points, colors=c)
|
| 124 |
+
pc.export(path)
|
| 125 |
+
return path
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def export_mesh_as_glb(mesh: trimesh.Trimesh, path: str) -> str:
|
| 129 |
+
mesh.visual.vertex_colors = np.tile(np.array([[185, 190, 200, 255]], dtype=np.uint8), (len(mesh.vertices), 1))
|
| 130 |
+
mesh.export(path)
|
| 131 |
+
return path
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def _resolve_spec(prompt: str, parser_mode: str, model_id: str | None = None) -> tuple[PromptSpec, str]:
|
| 135 |
+
parser_mode = (parser_mode or "heuristic").strip().lower()
|
| 136 |
+
if parser_mode.startswith("local"):
|
| 137 |
+
spec = parse_prompt_with_local_llm(prompt, model_id=model_id or DEFAULT_LOCAL_MODEL)
|
| 138 |
+
return spec, f"local_llm:{model_id or DEFAULT_LOCAL_MODEL}"
|
| 139 |
+
return parse_prompt(prompt), "heuristic"
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def _iter_part_specs(spec: PromptSpec, detail: int):
|
| 143 |
+
scale = SCALE_FACTORS[spec.scale]
|
| 144 |
+
density = max(6, detail)
|
| 145 |
+
|
| 146 |
+
hull_len = 2.8 * scale
|
| 147 |
+
hull_w = 1.2 * scale
|
| 148 |
+
hull_h = 0.8 * scale
|
| 149 |
+
|
| 150 |
+
if spec.hull_style == "rounded":
|
| 151 |
+
yield "Hull", *_sample_ellipsoid_surface((0.0, 0.0, 0.0), (hull_len / 2, hull_w / 2, hull_h / 2), density, 0)
|
| 152 |
+
elif spec.hull_style == "sleek":
|
| 153 |
+
p1, n1, l1 = _sample_ellipsoid_surface((0.12 * scale, 0.0, 0.0), (hull_len / 2.3, hull_w / 2.8, hull_h / 2.6), density, 0)
|
| 154 |
+
p2, n2, l2 = _sample_box_surface((-0.15 * scale, 0.0, -0.02 * scale), (hull_len * 0.52, hull_w * 0.5, hull_h * 0.55), max(4, density // 2), 0)
|
| 155 |
+
yield "Hull", np.vstack([p1, p2]), np.vstack([n1, n2]), np.concatenate([l1, l2])
|
| 156 |
+
else:
|
| 157 |
+
yield "Hull", *_sample_box_surface((0.0, 0.0, 0.0), (hull_len, hull_w, hull_h), density, 0)
|
| 158 |
+
|
| 159 |
+
cockpit_center = (hull_len / 2 - hull_len * spec.cockpit_ratio * 0.8, 0.0, hull_h * 0.14)
|
| 160 |
+
yield "Cockpit", *_sample_ellipsoid_surface(cockpit_center, (hull_len * spec.cockpit_ratio, hull_w * 0.22, hull_h * 0.24), max(4, density // 2), 1)
|
| 161 |
+
|
| 162 |
+
if spec.cargo_ratio > 0.16:
|
| 163 |
+
cargo_center = (-hull_len * 0.18, 0.0, -hull_h * 0.06)
|
| 164 |
+
cargo_size = (hull_len * spec.cargo_ratio, hull_w * 0.76, hull_h * 0.6)
|
| 165 |
+
yield "Cargo bay", *_sample_box_surface(cargo_center, cargo_size, max(4, density // 2), 2)
|
| 166 |
+
|
| 167 |
+
if spec.wing_span > 0:
|
| 168 |
+
wing_length = hull_len * 0.34
|
| 169 |
+
wing_width = hull_w * 0.18
|
| 170 |
+
wing_height = hull_h * 0.08
|
| 171 |
+
yoff = hull_w * 0.45 + wing_width * 0.6
|
| 172 |
+
wing_parts = []
|
| 173 |
+
wing_normals = []
|
| 174 |
+
wing_labels = []
|
| 175 |
+
for side in (-1, 1):
|
| 176 |
+
wc = (-0.1 * scale, side * yoff, -0.04 * scale)
|
| 177 |
+
pp, pn, pl = _sample_box_surface(wc, (wing_length, wing_width, wing_height), max(6, density // 3), 3)
|
| 178 |
+
wing_parts.append(pp)
|
| 179 |
+
wing_normals.append(pn)
|
| 180 |
+
wing_labels.append(pl)
|
| 181 |
+
yield "Wings", np.vstack(wing_parts), np.vstack(wing_normals), np.concatenate(wing_labels)
|
| 182 |
+
|
| 183 |
+
engine_radius = 0.14 * scale if spec.object_type != "fighter" else 0.1 * scale
|
| 184 |
+
engine_length = 0.48 * scale
|
| 185 |
+
engine_y_positions = np.linspace(-hull_w * 0.32, hull_w * 0.32, spec.engine_count)
|
| 186 |
+
engine_parts = []
|
| 187 |
+
engine_normals = []
|
| 188 |
+
engine_labels = []
|
| 189 |
+
for ypos in engine_y_positions:
|
| 190 |
+
ec = (-hull_len / 2 + engine_length * 0.3, ypos, 0.0)
|
| 191 |
+
pp, pn, pl = _sample_cylinder_surface(ec, engine_radius, engine_length, "x", max(6, density // 3), 4)
|
| 192 |
+
engine_parts.append(pp)
|
| 193 |
+
engine_normals.append(pn)
|
| 194 |
+
engine_labels.append(pl)
|
| 195 |
+
yield "Engines", np.vstack(engine_parts), np.vstack(engine_normals), np.concatenate(engine_labels)
|
| 196 |
+
|
| 197 |
+
if spec.fin_height > 0:
|
| 198 |
+
fin_center = (-hull_len * 0.25, 0.0, hull_h * 0.42)
|
| 199 |
+
fin_size = (hull_len * 0.18, hull_w * 0.1, hull_h * max(spec.fin_height, 0.12))
|
| 200 |
+
yield "Fin", *_sample_box_surface(fin_center, fin_size, max(6, density // 3), 5)
|
| 201 |
+
|
| 202 |
+
if spec.landing_gear:
|
| 203 |
+
gear_x = np.array([-hull_len * 0.18, hull_len * 0.12])
|
| 204 |
+
gear_y = np.array([-hull_w * 0.28, hull_w * 0.28])
|
| 205 |
+
gear_parts = []
|
| 206 |
+
gear_normals = []
|
| 207 |
+
gear_labels = []
|
| 208 |
+
for gx in gear_x:
|
| 209 |
+
for gy in gear_y:
|
| 210 |
+
gc = (gx, gy, -hull_h * 0.45)
|
| 211 |
+
pp, pn, pl = _sample_cylinder_surface(gc, 0.04 * scale, 0.22 * scale, "z", max(5, density // 5), 6)
|
| 212 |
+
gear_parts.append(pp)
|
| 213 |
+
gear_normals.append(pn)
|
| 214 |
+
gear_labels.append(pl)
|
| 215 |
+
yield "Landing gear", np.vstack(gear_parts), np.vstack(gear_normals), np.concatenate(gear_labels)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def iter_blueprint_session(
|
| 219 |
+
prompt: str,
|
| 220 |
+
detail: int = 24,
|
| 221 |
+
parser_mode: str = "heuristic",
|
| 222 |
+
model_id: str | None = None,
|
| 223 |
+
) -> Generator[dict, None, dict]:
|
| 224 |
+
prompt = (prompt or "").strip()
|
| 225 |
+
if not prompt:
|
| 226 |
+
raise ValueError("Enter a prompt first.")
|
| 227 |
+
|
| 228 |
+
out_dir = Path(tempfile.mkdtemp(prefix="particle_blueprint_session_"))
|
| 229 |
+
yield {"status": "Parsing prompt and planning shape…", "stage_index": 0, "stage_count": 1, "session_dir": str(out_dir)}
|
| 230 |
+
|
| 231 |
+
spec, parser_backend = _resolve_spec(prompt, parser_mode=parser_mode, model_id=model_id)
|
| 232 |
+
stages = list(_iter_part_specs(spec, detail=detail))
|
| 233 |
+
|
| 234 |
+
all_points = []
|
| 235 |
+
all_normals = []
|
| 236 |
+
all_labels = []
|
| 237 |
+
|
| 238 |
+
for idx, (stage_name, points, normals, labels) in enumerate(stages, start=1):
|
| 239 |
+
if spec.asymmetry > 0 and stage_name in {"Hull", "Cockpit", "Cargo bay"}:
|
| 240 |
+
mask = points[:, 1] > 0
|
| 241 |
+
points = points.copy()
|
| 242 |
+
points[mask, 2] += spec.asymmetry * np.sin(points[mask, 0] * 2.0)
|
| 243 |
+
|
| 244 |
+
all_points.append(points)
|
| 245 |
+
all_normals.append(normals)
|
| 246 |
+
all_labels.append(labels)
|
| 247 |
+
|
| 248 |
+
merged_points = np.vstack(all_points).astype(np.float32)
|
| 249 |
+
merged_normals = np.vstack(all_normals).astype(np.float32)
|
| 250 |
+
merged_labels = np.concatenate(all_labels).astype(np.int32)
|
| 251 |
+
|
| 252 |
+
preview_path = str(out_dir / f"blueprint_stage_{idx:02d}.ply")
|
| 253 |
+
export_point_cloud_as_ply(merged_points, merged_labels, preview_path)
|
| 254 |
+
|
| 255 |
+
summary = {
|
| 256 |
+
"prompt": prompt,
|
| 257 |
+
"parser_backend": parser_backend,
|
| 258 |
+
"spec": spec.to_dict(),
|
| 259 |
+
"stage": stage_name,
|
| 260 |
+
"stage_index": idx,
|
| 261 |
+
"stage_count": len(stages),
|
| 262 |
+
"point_count": int(len(merged_points)),
|
| 263 |
+
}
|
| 264 |
+
yield {
|
| 265 |
+
"status": f"{stage_name} added ({idx}/{len(stages)})",
|
| 266 |
+
"blueprint_path": preview_path,
|
| 267 |
+
"summary": summary,
|
| 268 |
+
"stage_index": idx,
|
| 269 |
+
"stage_count": len(stages),
|
| 270 |
+
"session_dir": str(out_dir),
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
final_points = np.vstack(all_points).astype(np.float32)
|
| 274 |
+
final_normals = np.vstack(all_normals).astype(np.float32)
|
| 275 |
+
final_labels = np.concatenate(all_labels).astype(np.int32)
|
| 276 |
+
|
| 277 |
+
npz_path = str(out_dir / "blueprint_data.npz")
|
| 278 |
+
np.savez_compressed(npz_path, points=final_points, normals=final_normals, labels=final_labels)
|
| 279 |
+
|
| 280 |
+
final_ply = str(out_dir / "blueprint_final.ply")
|
| 281 |
+
export_point_cloud_as_ply(final_points, final_labels, final_ply)
|
| 282 |
+
|
| 283 |
+
state = {
|
| 284 |
+
"prompt": prompt,
|
| 285 |
+
"parser_backend": parser_backend,
|
| 286 |
+
"spec": spec.to_dict(),
|
| 287 |
+
"point_count": int(len(final_points)),
|
| 288 |
+
"session_dir": str(out_dir),
|
| 289 |
+
"npz_path": npz_path,
|
| 290 |
+
"blueprint_path": final_ply,
|
| 291 |
+
"target_model": TARGET_OMNI_MODEL,
|
| 292 |
+
}
|
| 293 |
+
yield {
|
| 294 |
+
"status": "Blueprint ready. Inspect it, then run mesh generation when happy.",
|
| 295 |
+
"blueprint_path": final_ply,
|
| 296 |
+
"summary": {
|
| 297 |
+
**state,
|
| 298 |
+
"stage": "complete",
|
| 299 |
+
},
|
| 300 |
+
"stage_index": len(stages),
|
| 301 |
+
"stage_count": len(stages),
|
| 302 |
+
"state": state,
|
| 303 |
+
"session_dir": str(out_dir),
|
| 304 |
+
}
|
| 305 |
+
return state
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def points_to_mesh(points: np.ndarray, pitch: float = 0.08, padding: int = 5, sigma: float = 1.2, level: float = 0.11) -> trimesh.Trimesh:
|
| 309 |
+
mins = points.min(axis=0) - padding * pitch
|
| 310 |
+
maxs = points.max(axis=0) + padding * pitch
|
| 311 |
+
dims = np.ceil((maxs - mins) / pitch).astype(int) + 1
|
| 312 |
+
dims = np.clip(dims, 24, 192)
|
| 313 |
+
|
| 314 |
+
grid = np.zeros(tuple(dims.tolist()), dtype=np.float32)
|
| 315 |
+
coords = ((points - mins) / pitch).astype(int)
|
| 316 |
+
coords = np.clip(coords, 0, dims - 1)
|
| 317 |
+
np.add.at(grid, (coords[:, 0], coords[:, 1], coords[:, 2]), 1.0)
|
| 318 |
+
|
| 319 |
+
grid = ndimage.gaussian_filter(grid, sigma=sigma)
|
| 320 |
+
verts, faces, normals, _ = measure.marching_cubes(grid, level=level)
|
| 321 |
+
verts = verts * pitch + mins
|
| 322 |
+
|
| 323 |
+
mesh = trimesh.Trimesh(vertices=verts, faces=faces, vertex_normals=normals, process=True)
|
| 324 |
+
mesh.update_faces(mesh.nondegenerate_faces())
|
| 325 |
+
mesh.update_faces(mesh.unique_faces())
|
| 326 |
+
mesh.remove_unreferenced_vertices()
|
| 327 |
+
try:
|
| 328 |
+
mesh.fill_holes()
|
| 329 |
+
except Exception:
|
| 330 |
+
pass
|
| 331 |
+
try:
|
| 332 |
+
trimesh.smoothing.filter_humphrey(mesh, iterations=2)
|
| 333 |
+
except Exception:
|
| 334 |
+
pass
|
| 335 |
+
return mesh
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def iter_meshify_session(
|
| 339 |
+
state: dict,
|
| 340 |
+
voxel_pitch: float = 0.08,
|
| 341 |
+
use_target_model_cache: bool = True,
|
| 342 |
+
) -> Generator[dict, None, dict]:
|
| 343 |
+
if not state or not state.get("npz_path"):
|
| 344 |
+
raise ValueError("Generate a blueprint first.")
|
| 345 |
+
|
| 346 |
+
data = np.load(state["npz_path"])
|
| 347 |
+
points = data["points"].astype(np.float32)
|
| 348 |
+
labels = data["labels"].astype(np.int32)
|
| 349 |
+
session_dir = Path(state["session_dir"])
|
| 350 |
+
|
| 351 |
+
model_note = None
|
| 352 |
+
if use_target_model_cache:
|
| 353 |
+
yield {"status": f"Preparing target model cache for {TARGET_OMNI_MODEL}…"}
|
| 354 |
+
model_cache = ensure_target_model_cached(TARGET_OMNI_MODEL)
|
| 355 |
+
model_note = model_cache["message"]
|
| 356 |
+
yield {"status": model_note}
|
| 357 |
+
|
| 358 |
+
yield {"status": "Converting blueprint into a watertight mesh…"}
|
| 359 |
+
mesh = points_to_mesh(points, pitch=voxel_pitch)
|
| 360 |
+
|
| 361 |
+
yield {"status": "Exporting GLB…"}
|
| 362 |
+
glb_path = str(session_dir / "mesh_final.glb")
|
| 363 |
+
export_mesh_as_glb(mesh, glb_path)
|
| 364 |
+
|
| 365 |
+
summary = {
|
| 366 |
+
**state,
|
| 367 |
+
"mesh_backend": "local_voxel_mesher",
|
| 368 |
+
"target_model_cached": bool(use_target_model_cache),
|
| 369 |
+
"target_model": TARGET_OMNI_MODEL,
|
| 370 |
+
"target_model_note": model_note,
|
| 371 |
+
"vertex_count": int(len(mesh.vertices)),
|
| 372 |
+
"face_count": int(len(mesh.faces)),
|
| 373 |
+
"bounds": mesh.bounds.round(3).tolist(),
|
| 374 |
+
"voxel_pitch": voxel_pitch,
|
| 375 |
+
"mesh_path": glb_path,
|
| 376 |
+
}
|
| 377 |
+
yield {
|
| 378 |
+
"status": "Mesh ready.",
|
| 379 |
+
"mesh_path": glb_path,
|
| 380 |
+
"summary": summary,
|
| 381 |
+
"mesh_file": glb_path,
|
| 382 |
+
}
|
| 383 |
+
return summary
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
# Backward-compatible helper for older single-click flow.
|
| 387 |
+
def run_pipeline(
|
| 388 |
+
prompt: str,
|
| 389 |
+
detail: int = 24,
|
| 390 |
+
voxel_pitch: float = 0.08,
|
| 391 |
+
parser_mode: str = "heuristic",
|
| 392 |
+
model_id: str | None = None,
|
| 393 |
+
) -> BuildArtifacts:
|
| 394 |
+
final_state = None
|
| 395 |
+
final_summary = None
|
| 396 |
+
blueprint_path = None
|
| 397 |
+
for update in iter_blueprint_session(prompt, detail=detail, parser_mode=parser_mode, model_id=model_id):
|
| 398 |
+
blueprint_path = update.get("blueprint_path", blueprint_path)
|
| 399 |
+
final_state = update.get("state", final_state)
|
| 400 |
+
final_summary = update.get("summary", final_summary)
|
| 401 |
+
mesh_summary = None
|
| 402 |
+
mesh_path = None
|
| 403 |
+
if final_state is None:
|
| 404 |
+
raise RuntimeError("Blueprint generation failed.")
|
| 405 |
+
for update in iter_meshify_session(final_state, voxel_pitch=voxel_pitch, use_target_model_cache=False):
|
| 406 |
+
mesh_path = update.get("mesh_path", mesh_path)
|
| 407 |
+
mesh_summary = update.get("summary", mesh_summary)
|
| 408 |
+
summary = mesh_summary or final_summary or {}
|
| 409 |
+
return BuildArtifacts(ply_path=blueprint_path or "", glb_path=mesh_path or "", summary=summary)
|
llm_parser.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
from functools import lru_cache
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from parser import PromptSpec, merge_prompt_specs, parse_prompt
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
import spaces # type: ignore
|
| 13 |
+
except Exception: # pragma: no cover
|
| 14 |
+
class _SpacesShim:
|
| 15 |
+
@staticmethod
|
| 16 |
+
def GPU(*args, **kwargs):
|
| 17 |
+
def decorator(fn):
|
| 18 |
+
return fn
|
| 19 |
+
return decorator
|
| 20 |
+
|
| 21 |
+
spaces = _SpacesShim() # type: ignore
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
DEFAULT_LOCAL_MODEL = os.getenv("PB3D_LOCAL_MODEL", "Qwen/Qwen2.5-1.5B-Instruct")
|
| 25 |
+
MODEL_PRESETS = {
|
| 26 |
+
"Qwen 2.5 1.5B": "Qwen/Qwen2.5-1.5B-Instruct",
|
| 27 |
+
"SmolLM2 1.7B": "HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
JSON_SCHEMA_HINT = {
|
| 31 |
+
"object_type": ["cargo_hauler", "fighter", "shuttle", "freighter", "dropship", "drone"],
|
| 32 |
+
"scale": ["small", "medium", "large"],
|
| 33 |
+
"hull_style": ["boxy", "rounded", "sleek"],
|
| 34 |
+
"engine_count": "integer 1-6",
|
| 35 |
+
"wing_span": "float 0.0-0.6",
|
| 36 |
+
"cargo_ratio": "float 0.0-0.65",
|
| 37 |
+
"cockpit_ratio": "float 0.10-0.30",
|
| 38 |
+
"fin_height": "float 0.0-0.3",
|
| 39 |
+
"landing_gear": "boolean",
|
| 40 |
+
"asymmetry": "float 0.0-0.2",
|
| 41 |
+
"notes": "short string",
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _clamp(value: float, low: float, high: float) -> float:
|
| 46 |
+
return max(low, min(high, value))
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@lru_cache(maxsize=2)
|
| 50 |
+
def _load_generation_components(model_id: str):
|
| 51 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 52 |
+
import torch
|
| 53 |
+
|
| 54 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
| 55 |
+
if tokenizer.pad_token is None:
|
| 56 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 57 |
+
|
| 58 |
+
has_cuda = torch.cuda.is_available()
|
| 59 |
+
torch_dtype = torch.bfloat16 if has_cuda else torch.float32
|
| 60 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 61 |
+
model_id,
|
| 62 |
+
torch_dtype=torch_dtype,
|
| 63 |
+
device_map="auto",
|
| 64 |
+
low_cpu_mem_usage=True,
|
| 65 |
+
trust_remote_code=True,
|
| 66 |
+
)
|
| 67 |
+
return tokenizer, model
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@spaces.GPU(duration=45)
|
| 71 |
+
def _generate_structured_json(prompt: str, model_id: str) -> dict[str, Any]:
|
| 72 |
+
import torch
|
| 73 |
+
|
| 74 |
+
tokenizer, model = _load_generation_components(model_id)
|
| 75 |
+
|
| 76 |
+
system = (
|
| 77 |
+
"You are a compact design parser for a procedural 3D generator. "
|
| 78 |
+
"Convert the user request into a single JSON object and output JSON only."
|
| 79 |
+
)
|
| 80 |
+
user = (
|
| 81 |
+
"Return a JSON object using this schema: "
|
| 82 |
+
f"{json.dumps(JSON_SCHEMA_HINT)}\n"
|
| 83 |
+
"Rules: choose the closest allowed enum values, stay conservative, infer hard-surface sci-fi vehicle structure, "
|
| 84 |
+
"never explain anything, never use markdown fences, and keep notes brief.\n"
|
| 85 |
+
f"Prompt: {prompt}"
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
messages = [
|
| 89 |
+
{"role": "system", "content": system},
|
| 90 |
+
{"role": "user", "content": user},
|
| 91 |
+
]
|
| 92 |
+
|
| 93 |
+
if hasattr(tokenizer, "apply_chat_template"):
|
| 94 |
+
rendered = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 95 |
+
else:
|
| 96 |
+
rendered = f"System: {system}\nUser: {user}\nAssistant:"
|
| 97 |
+
|
| 98 |
+
inputs = tokenizer(rendered, return_tensors="pt")
|
| 99 |
+
model_device = getattr(model, "device", None)
|
| 100 |
+
if model_device is not None:
|
| 101 |
+
inputs = {k: v.to(model_device) for k, v in inputs.items()}
|
| 102 |
+
|
| 103 |
+
with torch.no_grad():
|
| 104 |
+
output = model.generate(
|
| 105 |
+
**inputs,
|
| 106 |
+
max_new_tokens=220,
|
| 107 |
+
do_sample=False,
|
| 108 |
+
temperature=None,
|
| 109 |
+
top_p=None,
|
| 110 |
+
repetition_penalty=1.02,
|
| 111 |
+
pad_token_id=tokenizer.pad_token_id,
|
| 112 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
new_tokens = output[0][inputs["input_ids"].shape[1]:]
|
| 116 |
+
text = tokenizer.decode(new_tokens, skip_special_tokens=True).strip()
|
| 117 |
+
|
| 118 |
+
match = re.search(r"\{.*\}", text, flags=re.S)
|
| 119 |
+
if not match:
|
| 120 |
+
raise ValueError("Local model did not return JSON.")
|
| 121 |
+
return json.loads(match.group(0))
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def _normalize_llm_payload(payload: dict[str, Any], original_prompt: str) -> PromptSpec:
|
| 125 |
+
def get_str(name: str, default: str) -> str:
|
| 126 |
+
value = str(payload.get(name, default)).strip().lower()
|
| 127 |
+
return value or default
|
| 128 |
+
|
| 129 |
+
def get_int(name: str, default: int, low: int, high: int) -> int:
|
| 130 |
+
try:
|
| 131 |
+
return int(_clamp(int(payload.get(name, default)), low, high))
|
| 132 |
+
except Exception:
|
| 133 |
+
return default
|
| 134 |
+
|
| 135 |
+
def get_float(name: str, default: float, low: float, high: float) -> float:
|
| 136 |
+
try:
|
| 137 |
+
return float(_clamp(float(payload.get(name, default)), low, high))
|
| 138 |
+
except Exception:
|
| 139 |
+
return default
|
| 140 |
+
|
| 141 |
+
landing_raw = payload.get("landing_gear", True)
|
| 142 |
+
if isinstance(landing_raw, bool):
|
| 143 |
+
landing_gear = landing_raw
|
| 144 |
+
else:
|
| 145 |
+
landing_gear = str(landing_raw).strip().lower() in {"1", "true", "yes", "y"}
|
| 146 |
+
|
| 147 |
+
return PromptSpec(
|
| 148 |
+
object_type=get_str("object_type", "cargo_hauler"),
|
| 149 |
+
scale=get_str("scale", "small"),
|
| 150 |
+
hull_style=get_str("hull_style", "boxy"),
|
| 151 |
+
engine_count=get_int("engine_count", 2, 1, 6),
|
| 152 |
+
wing_span=get_float("wing_span", 0.2, 0.0, 0.6),
|
| 153 |
+
cargo_ratio=get_float("cargo_ratio", 0.38, 0.0, 0.65),
|
| 154 |
+
cockpit_ratio=get_float("cockpit_ratio", 0.18, 0.10, 0.30),
|
| 155 |
+
fin_height=get_float("fin_height", 0.0, 0.0, 0.3),
|
| 156 |
+
landing_gear=landing_gear,
|
| 157 |
+
asymmetry=get_float("asymmetry", 0.0, 0.0, 0.2),
|
| 158 |
+
notes=str(payload.get("notes", original_prompt)).strip() or original_prompt,
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def parse_prompt_with_local_llm(prompt: str, model_id: str | None = None) -> PromptSpec:
|
| 163 |
+
model_id = model_id or DEFAULT_LOCAL_MODEL
|
| 164 |
+
heuristic = parse_prompt(prompt)
|
| 165 |
+
payload = _generate_structured_json(prompt=prompt, model_id=model_id)
|
| 166 |
+
llm_spec = _normalize_llm_payload(payload, original_prompt=prompt)
|
| 167 |
+
return merge_prompt_specs(heuristic, llm_spec)
|
model_runtime.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Callable
|
| 6 |
+
|
| 7 |
+
from huggingface_hub import snapshot_download
|
| 8 |
+
|
| 9 |
+
TARGET_OMNI_MODEL = os.getenv("PB3D_TARGET_MODEL", "tencent/Hunyuan3D-Omni")
|
| 10 |
+
DEFAULT_CACHE_ROOT = Path(os.getenv("PB3D_MODEL_CACHE", "./models"))
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_target_model_dir(model_id: str = TARGET_OMNI_MODEL) -> Path:
|
| 14 |
+
safe = model_id.replace("/", "--")
|
| 15 |
+
return DEFAULT_CACHE_ROOT / safe
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def ensure_target_model_cached(
|
| 19 |
+
model_id: str = TARGET_OMNI_MODEL,
|
| 20 |
+
progress: Callable[[str], None] | None = None,
|
| 21 |
+
) -> dict:
|
| 22 |
+
"""
|
| 23 |
+
Best-effort local cache of the upstream target model repo.
|
| 24 |
+
|
| 25 |
+
This fetches the model repo into the Space filesystem so later integration work
|
| 26 |
+
can call the upstream inference stack directly. It does not assume internal file
|
| 27 |
+
names beyond the official repo id.
|
| 28 |
+
"""
|
| 29 |
+
target_dir = get_target_model_dir(model_id)
|
| 30 |
+
target_dir.mkdir(parents=True, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
if progress:
|
| 33 |
+
progress(f"Checking local cache for {model_id}…")
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
local_path = snapshot_download(
|
| 37 |
+
repo_id=model_id,
|
| 38 |
+
local_dir=str(target_dir),
|
| 39 |
+
local_dir_use_symlinks=False,
|
| 40 |
+
resume_download=True,
|
| 41 |
+
)
|
| 42 |
+
return {
|
| 43 |
+
"ok": True,
|
| 44 |
+
"model_id": model_id,
|
| 45 |
+
"local_path": local_path,
|
| 46 |
+
"message": f"Cached {model_id} in {local_path}",
|
| 47 |
+
}
|
| 48 |
+
except Exception as exc: # pragma: no cover
|
| 49 |
+
return {
|
| 50 |
+
"ok": False,
|
| 51 |
+
"model_id": model_id,
|
| 52 |
+
"local_path": str(target_dir),
|
| 53 |
+
"message": f"Could not cache {model_id}: {exc}",
|
| 54 |
+
}
|
packages.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
git
|
| 2 |
+
ffmpeg
|
| 3 |
+
libgl1
|
| 4 |
+
libglib2.0-0
|
parser.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
from dataclasses import dataclass, asdict
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@dataclass
|
| 8 |
+
class PromptSpec:
|
| 9 |
+
object_type: str = "cargo_hauler"
|
| 10 |
+
scale: str = "small"
|
| 11 |
+
hull_style: str = "boxy"
|
| 12 |
+
engine_count: int = 2
|
| 13 |
+
wing_span: float = 0.2
|
| 14 |
+
cargo_ratio: float = 0.38
|
| 15 |
+
cockpit_ratio: float = 0.18
|
| 16 |
+
fin_height: float = 0.0
|
| 17 |
+
landing_gear: bool = True
|
| 18 |
+
asymmetry: float = 0.0
|
| 19 |
+
notes: str = ""
|
| 20 |
+
|
| 21 |
+
def to_dict(self) -> dict:
|
| 22 |
+
return asdict(self)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
TYPE_KEYWORDS = {
|
| 26 |
+
"fighter": "fighter",
|
| 27 |
+
"combat": "fighter",
|
| 28 |
+
"interceptor": "fighter",
|
| 29 |
+
"shuttle": "shuttle",
|
| 30 |
+
"freighter": "freighter",
|
| 31 |
+
"hauler": "cargo_hauler",
|
| 32 |
+
"cargo": "cargo_hauler",
|
| 33 |
+
"transport": "cargo_hauler",
|
| 34 |
+
"dropship": "dropship",
|
| 35 |
+
"drone": "drone",
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
STYLE_KEYWORDS = {
|
| 39 |
+
"boxy": "boxy",
|
| 40 |
+
"industrial": "boxy",
|
| 41 |
+
"hard-surface": "boxy",
|
| 42 |
+
"rounded": "rounded",
|
| 43 |
+
"sleek": "sleek",
|
| 44 |
+
"streamlined": "sleek",
|
| 45 |
+
"brutalist": "boxy",
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
SCALE_KEYWORDS = {
|
| 49 |
+
"tiny": "small",
|
| 50 |
+
"small": "small",
|
| 51 |
+
"compact": "small",
|
| 52 |
+
"medium": "medium",
|
| 53 |
+
"mid-size": "medium",
|
| 54 |
+
"large": "large",
|
| 55 |
+
"heavy": "large",
|
| 56 |
+
"huge": "large",
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
VALID_OBJECT_TYPES = {"cargo_hauler", "fighter", "shuttle", "freighter", "dropship", "drone"}
|
| 60 |
+
VALID_SCALES = {"small", "medium", "large"}
|
| 61 |
+
VALID_HULL_STYLES = {"boxy", "rounded", "sleek"}
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _clamp(value: float, low: float, high: float) -> float:
|
| 65 |
+
return max(low, min(high, value))
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def merge_prompt_specs(primary: PromptSpec, secondary: PromptSpec) -> PromptSpec:
|
| 69 |
+
merged = PromptSpec(**primary.to_dict())
|
| 70 |
+
|
| 71 |
+
if secondary.object_type in VALID_OBJECT_TYPES:
|
| 72 |
+
merged.object_type = secondary.object_type
|
| 73 |
+
if secondary.scale in VALID_SCALES:
|
| 74 |
+
merged.scale = secondary.scale
|
| 75 |
+
if secondary.hull_style in VALID_HULL_STYLES:
|
| 76 |
+
merged.hull_style = secondary.hull_style
|
| 77 |
+
|
| 78 |
+
merged.engine_count = int(_clamp(secondary.engine_count, 1, 6))
|
| 79 |
+
merged.wing_span = float(_clamp(secondary.wing_span, 0.0, 0.6))
|
| 80 |
+
merged.cargo_ratio = float(_clamp(secondary.cargo_ratio, 0.0, 0.65))
|
| 81 |
+
merged.cockpit_ratio = float(_clamp(secondary.cockpit_ratio, 0.10, 0.30))
|
| 82 |
+
merged.fin_height = float(_clamp(secondary.fin_height, 0.0, 0.3))
|
| 83 |
+
merged.landing_gear = bool(secondary.landing_gear)
|
| 84 |
+
merged.asymmetry = float(_clamp(secondary.asymmetry, 0.0, 0.2))
|
| 85 |
+
merged.notes = secondary.notes or primary.notes
|
| 86 |
+
|
| 87 |
+
if merged.object_type in {"fighter", "drone"}:
|
| 88 |
+
merged.cargo_ratio = min(merged.cargo_ratio, 0.20)
|
| 89 |
+
if merged.hull_style == "boxy":
|
| 90 |
+
merged.hull_style = "sleek"
|
| 91 |
+
|
| 92 |
+
return merged
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def parse_prompt(prompt: str) -> PromptSpec:
|
| 96 |
+
text = prompt.lower().strip()
|
| 97 |
+
spec = PromptSpec(notes=prompt.strip())
|
| 98 |
+
|
| 99 |
+
for key, value in TYPE_KEYWORDS.items():
|
| 100 |
+
if key in text:
|
| 101 |
+
spec.object_type = value
|
| 102 |
+
break
|
| 103 |
+
|
| 104 |
+
for key, value in STYLE_KEYWORDS.items():
|
| 105 |
+
if key in text:
|
| 106 |
+
spec.hull_style = value
|
| 107 |
+
break
|
| 108 |
+
|
| 109 |
+
for key, value in SCALE_KEYWORDS.items():
|
| 110 |
+
if key in text:
|
| 111 |
+
spec.scale = value
|
| 112 |
+
break
|
| 113 |
+
|
| 114 |
+
if any(word in text for word in ["wing", "wings"]):
|
| 115 |
+
spec.wing_span = 0.42 if spec.object_type == "fighter" else 0.28
|
| 116 |
+
if any(word in text for word in ["no wings", "wingless"]):
|
| 117 |
+
spec.wing_span = 0.0
|
| 118 |
+
|
| 119 |
+
if any(word in text for word in ["cargo bay", "cargo hold", "container", "freight"]):
|
| 120 |
+
spec.cargo_ratio = 0.48
|
| 121 |
+
|
| 122 |
+
if any(word in text for word in ["big cockpit", "large cockpit", "glass nose"]):
|
| 123 |
+
spec.cockpit_ratio = 0.24
|
| 124 |
+
if any(word in text for word in ["small cockpit", "tiny cockpit"]):
|
| 125 |
+
spec.cockpit_ratio = 0.13
|
| 126 |
+
|
| 127 |
+
if any(word in text for word in ["fin", "tail", "vertical stabilizer"]):
|
| 128 |
+
spec.fin_height = 0.18 if spec.object_type != "fighter" else 0.12
|
| 129 |
+
|
| 130 |
+
if any(word in text for word in ["hover", "hovercraft", "antigrav"]):
|
| 131 |
+
spec.landing_gear = False
|
| 132 |
+
|
| 133 |
+
if spec.object_type in {"fighter", "drone"}:
|
| 134 |
+
spec.engine_count = 1 if "single engine" in text else 2
|
| 135 |
+
spec.cargo_ratio = min(spec.cargo_ratio, 0.18)
|
| 136 |
+
spec.hull_style = "sleek"
|
| 137 |
+
elif spec.object_type in {"cargo_hauler", "freighter", "dropship"}:
|
| 138 |
+
spec.engine_count = 4 if any(x in text for x in ["4 engine", "four engine", "quad engine"]) else 2
|
| 139 |
+
spec.hull_style = "boxy" if spec.hull_style == "sleek" else spec.hull_style
|
| 140 |
+
|
| 141 |
+
numeric_engine = re.search(r"(\d+)\s*(?:engine|engines)", text)
|
| 142 |
+
if numeric_engine:
|
| 143 |
+
spec.engine_count = max(1, min(6, int(numeric_engine.group(1))))
|
| 144 |
+
|
| 145 |
+
if any(word in text for word in ["asymmetric", "uneven", "offset"]):
|
| 146 |
+
spec.asymmetry = 0.12
|
| 147 |
+
|
| 148 |
+
return spec
|
requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=5.22.0
|
| 2 |
+
numpy>=1.26.0
|
| 3 |
+
trimesh[easy]>=4.5.0
|
| 4 |
+
scipy>=1.13.0
|
| 5 |
+
scikit-image>=0.24.0
|
| 6 |
+
huggingface_hub>=0.34.0
|
| 7 |
+
requests>=2.32.0
|
| 8 |
+
transformers>=4.47.0
|
| 9 |
+
pillow>=10.4.0
|
| 10 |
+
torch>=2.5.0,<2.6.0
|
| 11 |
+
torchvision>=0.20.0,<0.21.0
|
| 12 |
+
torchaudio>=2.5.0,<2.6.0
|
viewer.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Iterable
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import trimesh
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _sample_points(points: np.ndarray, max_points: int = 3500) -> np.ndarray:
|
| 12 |
+
if len(points) <= max_points:
|
| 13 |
+
return points.astype(float)
|
| 14 |
+
idx = np.linspace(0, len(points) - 1, max_points).astype(int)
|
| 15 |
+
return points[idx].astype(float)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def load_points_from_cloud_file(path: str | Path, max_points: int = 3500) -> np.ndarray:
|
| 19 |
+
cloud = trimesh.load(path, force="mesh")
|
| 20 |
+
if isinstance(cloud, trimesh.points.PointCloud):
|
| 21 |
+
points = np.asarray(cloud.vertices)
|
| 22 |
+
elif isinstance(cloud, trimesh.Trimesh):
|
| 23 |
+
if len(cloud.faces) > 0:
|
| 24 |
+
count = min(max_points * 2, max(1200, len(cloud.faces) * 3))
|
| 25 |
+
points = cloud.sample(count)
|
| 26 |
+
else:
|
| 27 |
+
points = np.asarray(cloud.vertices)
|
| 28 |
+
else:
|
| 29 |
+
points = np.asarray(getattr(cloud, "vertices", []))
|
| 30 |
+
return _sample_points(np.asarray(points, dtype=float), max_points=max_points)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def load_points_from_mesh_file(path: str | Path, max_points: int = 3500) -> np.ndarray:
|
| 34 |
+
mesh = trimesh.load(path, force="mesh")
|
| 35 |
+
if isinstance(mesh, trimesh.Scene):
|
| 36 |
+
mesh = trimesh.util.concatenate([g for g in mesh.geometry.values() if isinstance(g, trimesh.Trimesh)])
|
| 37 |
+
if isinstance(mesh, trimesh.Trimesh):
|
| 38 |
+
if len(mesh.faces) > 0:
|
| 39 |
+
count = min(max_points * 2, max(1600, len(mesh.faces) * 2))
|
| 40 |
+
points = mesh.sample(count)
|
| 41 |
+
else:
|
| 42 |
+
points = np.asarray(mesh.vertices)
|
| 43 |
+
else:
|
| 44 |
+
points = np.asarray(getattr(mesh, "vertices", []))
|
| 45 |
+
return _sample_points(np.asarray(points, dtype=float), max_points=max_points)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def empty_viewer_html(message: str = "Generate a blueprint to preview it here.") -> str:
|
| 49 |
+
return f"""
|
| 50 |
+
<div style='height:520px;border-radius:20px;border:1px solid rgba(255,255,255,.08);display:flex;align-items:center;justify-content:center;background:linear-gradient(180deg,#06070a,#0b1020);color:#cfd6ff;font-family:Inter,system-ui,sans-serif;'>
|
| 51 |
+
<div style='text-align:center;padding:18px 24px;max-width:420px;'>
|
| 52 |
+
<div style='font-size:1.12rem;font-weight:700;margin-bottom:6px;'>Blueprint Viewer</div>
|
| 53 |
+
<div style='opacity:.84;line-height:1.45'>{message}</div>
|
| 54 |
+
</div>
|
| 55 |
+
</div>
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def point_cloud_viewer_html(points: np.ndarray, status: str = "Blueprint") -> str:
|
| 60 |
+
points = np.asarray(points, dtype=float)
|
| 61 |
+
if points.size == 0:
|
| 62 |
+
return empty_viewer_html("No points to display yet.")
|
| 63 |
+
|
| 64 |
+
points = _sample_points(points)
|
| 65 |
+
mins = points.min(axis=0)
|
| 66 |
+
maxs = points.max(axis=0)
|
| 67 |
+
center = (mins + maxs) / 2.0
|
| 68 |
+
span = float(np.max(maxs - mins)) or 1.0
|
| 69 |
+
normalized = (points - center) / span
|
| 70 |
+
color_src = normalized - normalized.min(axis=0, keepdims=True)
|
| 71 |
+
denom = color_src.ptp(axis=0, keepdims=True)
|
| 72 |
+
denom[denom == 0] = 1.0
|
| 73 |
+
colors = np.clip(color_src / denom, 0.0, 1.0)
|
| 74 |
+
|
| 75 |
+
points_payload = np.round(normalized, 5).tolist()
|
| 76 |
+
colors_payload = np.round(colors, 5).tolist()
|
| 77 |
+
|
| 78 |
+
return f"""
|
| 79 |
+
<div style="height:560px;border-radius:20px;overflow:hidden;border:1px solid rgba(255,255,255,.08);background:radial-gradient(circle at 50% 20%, #0f1630, #05070d 75%);position:relative;">
|
| 80 |
+
<div id="pb3d-label" style="position:absolute;top:12px;left:12px;z-index:2;background:rgba(16,21,40,.75);border:1px solid rgba(255,255,255,.08);backdrop-filter:blur(8px);padding:10px 12px;border-radius:14px;color:#eef2ff;font:600 14px/1.3 Inter,system-ui,sans-serif;">{status}<div style="opacity:.75;font-weight:500;margin-top:4px">One finger orbit • two fingers pan/zoom</div></div>
|
| 81 |
+
<canvas id="pb3d-canvas" style="width:100%;height:100%;display:block;touch-action:none"></canvas>
|
| 82 |
+
</div>
|
| 83 |
+
<script src="https://unpkg.com/three@0.160.0/build/three.min.js"></script>
|
| 84 |
+
<script src="https://unpkg.com/three@0.160.0/examples/js/controls/OrbitControls.js"></script>
|
| 85 |
+
<script>
|
| 86 |
+
(() => {{
|
| 87 |
+
const canvas = document.currentScript.previousElementSibling;
|
| 88 |
+
const holder = canvas.parentElement;
|
| 89 |
+
if (!window.THREE || !window.THREE.OrbitControls) {{
|
| 90 |
+
holder.innerHTML = `<div style='height:100%;display:flex;align-items:center;justify-content:center;color:#eef2ff;font-family:Inter,system-ui,sans-serif;'>Viewer failed to load.</div>`;
|
| 91 |
+
return;
|
| 92 |
+
}}
|
| 93 |
+
const pts = {json.dumps(points_payload)};
|
| 94 |
+
const cols = {json.dumps(colors_payload)};
|
| 95 |
+
const THREE = window.THREE;
|
| 96 |
+
const scene = new THREE.Scene();
|
| 97 |
+
const camera = new THREE.PerspectiveCamera(50, holder.clientWidth / holder.clientHeight, 0.01, 100);
|
| 98 |
+
camera.position.set(1.8, 1.4, 2.2);
|
| 99 |
+
|
| 100 |
+
const renderer = new THREE.WebGLRenderer({{canvas, antialias:true, alpha:true}});
|
| 101 |
+
renderer.setPixelRatio(Math.min(window.devicePixelRatio || 1, 2));
|
| 102 |
+
renderer.setSize(holder.clientWidth, holder.clientHeight, false);
|
| 103 |
+
renderer.outputColorSpace = THREE.SRGBColorSpace;
|
| 104 |
+
|
| 105 |
+
const controls = new THREE.OrbitControls(camera, canvas);
|
| 106 |
+
controls.enableDamping = true;
|
| 107 |
+
controls.enablePan = true;
|
| 108 |
+
controls.minDistance = 0.4;
|
| 109 |
+
controls.maxDistance = 8;
|
| 110 |
+
controls.target.set(0,0,0);
|
| 111 |
+
|
| 112 |
+
const positions = new Float32Array(pts.length * 3);
|
| 113 |
+
const colors = new Float32Array(cols.length * 3);
|
| 114 |
+
for (let i = 0; i < pts.length; i++) {{
|
| 115 |
+
positions[i*3] = pts[i][0];
|
| 116 |
+
positions[i*3+1] = pts[i][2];
|
| 117 |
+
positions[i*3+2] = pts[i][1];
|
| 118 |
+
colors[i*3] = cols[i][0] * 0.85 + 0.15;
|
| 119 |
+
colors[i*3+1] = cols[i][1] * 0.85 + 0.15;
|
| 120 |
+
colors[i*3+2] = cols[i][2] * 0.85 + 0.15;
|
| 121 |
+
}}
|
| 122 |
+
const geometry = new THREE.BufferGeometry();
|
| 123 |
+
geometry.setAttribute('position', new THREE.BufferAttribute(positions, 3));
|
| 124 |
+
geometry.setAttribute('color', new THREE.BufferAttribute(colors, 3));
|
| 125 |
+
const material = new THREE.PointsMaterial({{size: 0.025, sizeAttenuation: true, vertexColors:true}});
|
| 126 |
+
const cloud = new THREE.Points(geometry, material);
|
| 127 |
+
scene.add(cloud);
|
| 128 |
+
|
| 129 |
+
const grid = new THREE.GridHelper(2.4, 12, 0x4c5cff, 0x1d2645);
|
| 130 |
+
grid.position.y = -0.72;
|
| 131 |
+
scene.add(grid);
|
| 132 |
+
|
| 133 |
+
const lightA = new THREE.DirectionalLight(0xffffff, 1.8);
|
| 134 |
+
lightA.position.set(2, 3, 2);
|
| 135 |
+
scene.add(lightA);
|
| 136 |
+
const lightB = new THREE.DirectionalLight(0x7795ff, 0.9);
|
| 137 |
+
lightB.position.set(-2, -1, -1.5);
|
| 138 |
+
scene.add(lightB);
|
| 139 |
+
scene.add(new THREE.AmbientLight(0xb8c8ff, 0.8));
|
| 140 |
+
|
| 141 |
+
function resize() {{
|
| 142 |
+
const w = holder.clientWidth;
|
| 143 |
+
const h = holder.clientHeight;
|
| 144 |
+
camera.aspect = w / h;
|
| 145 |
+
camera.updateProjectionMatrix();
|
| 146 |
+
renderer.setSize(w, h, false);
|
| 147 |
+
}}
|
| 148 |
+
window.addEventListener('resize', resize);
|
| 149 |
+
|
| 150 |
+
function tick() {{
|
| 151 |
+
controls.update();
|
| 152 |
+
renderer.render(scene, camera);
|
| 153 |
+
requestAnimationFrame(tick);
|
| 154 |
+
}}
|
| 155 |
+
tick();
|
| 156 |
+
}})();
|
| 157 |
+
</script>
|
| 158 |
+
"""
|