| #!/bin/bash |
| |
| set -e |
|
|
| echo "=== Dataset-Prep v3: Base Setup ===" |
|
|
| |
| apt-get update -qq && apt-get install -y -qq curl git ffmpeg libgl1 libglib2.0-0 bc > /dev/null 2>&1 |
| pip install -q --upgrade pip |
| pip install -q uv 2>&1 | tail -1 |
|
|
| |
| GPU_ARCH=$(python3 -c "import subprocess; o=subprocess.check_output(['nvidia-smi','--query-gpu=compute_cap','--format=csv,noheader,nounits'],text=True).strip(); print(o)" 2>/dev/null || echo "0") |
| if [ "$(echo "$GPU_ARCH >= 12.0" | bc 2>/dev/null)" = "1" ]; then |
| echo "Blackwell GPU detected (sm_${GPU_ARCH}) — upgrading PyTorch to cu128..." |
| pip install --upgrade torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu128 -q 2>/dev/null || true |
| fi |
|
|
| |
| echo "Installing backend deps..." |
| uv pip install --system -q \ |
| fastapi 'uvicorn[standard]' websockets httpx pydantic \ |
| pillow opencv-python-headless numpy huggingface-hub python-multipart \ |
| 2>&1 | tail -5 |
| echo " backend deps done." |
|
|
| |
| echo "Installing processing deps..." |
| uv pip install --system -q \ |
| imagehash \ |
| open-clip-torch transformers accelerate safetensors \ |
| nudenet mediapipe onnxruntime-gpu \ |
| 2>&1 | tail -5 |
| echo " processing deps done." |
|
|
| |
| echo "Pre-downloading ML models..." |
| python3 << 'PYEOF' |
| from transformers import AutoModel, AutoProcessor |
|
|
| |
| print(" Downloading SigLIP (ViT-SO400M)...") |
| AutoProcessor.from_pretrained("google/siglip-so400m-patch14-384") |
| AutoModel.from_pretrained("google/siglip-so400m-patch14-384") |
| print(" OK: SigLIP") |
|
|
| |
| print(" Downloading CLIP ViT-L/14...") |
| AutoProcessor.from_pretrained("openai/clip-vit-large-patch14") |
| AutoModel.from_pretrained("openai/clip-vit-large-patch14") |
| print(" OK: CLIP ViT-L") |
|
|
| |
| print(" Downloading NSFW detector...") |
| from transformers import pipeline as hf_pipeline |
| hf_pipeline("image-classification", model="Falconsai/nsfw_image_detection") |
| print(" OK: NSFW detector") |
|
|
| |
| print(" Downloading NudeNet...") |
| from nudenet import NudeDetector |
| NudeDetector() |
| print(" OK: NudeNet") |
|
|
| print("All models cached.") |
| PYEOF |
|
|
| |
| echo "Downloading backend code..." |
| mkdir -p /workspace/dataset-prep-v3 |
| cd /workspace/dataset-prep-v3 |
| python3 -c " |
| from huggingface_hub import hf_hub_download |
| import tarfile |
| path = hf_hub_download('msrcam/ds-prep-backend', 'backend.tar.gz', repo_type='dataset') |
| with tarfile.open(path) as t: |
| t.extractall('/workspace/dataset-prep-v3') |
| print('Backend code ready.') |
| " || echo "WARNING: Failed to download backend from HF" |
|
|
| mkdir -p /workspace/dataset-prep-v3/data/settings /workspace/dataset-prep-v3/data/presets |
|
|
| |
| if [ -n "$HF_TOKEN" ]; then |
| python3 -c "import json; json.dump({'repo':'msrcam/shared-datasets','token':'$HF_TOKEN'}, open('/workspace/dataset-prep-v3/data/hf_config.json','w'))" |
| echo "HF token configured." |
| fi |
|
|
| |
| cd /workspace/dataset-prep-v3 |
| nohup python -m uvicorn backend.main:app --host 0.0.0.0 --port 7870 > /tmp/ds-v3.log 2>&1 & |
| echo $! > /tmp/ds-health.pid |
| echo "=== Backend running on port 7870 ===" |
|
|