File size: 3,389 Bytes
d07571c 9f1ac82 d07571c 9f1ac82 d07571c 9f1ac82 d07571c 9f1ac82 d07571c 9f1ac82 d07571c 9f1ac82 d07571c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | #!/bin/bash
# Dataset-Prep v3 — Base setup (extract, CLIP, bodycrop, clean)
set -e
echo "=== Dataset-Prep v3: Base Setup ==="
# System deps
apt-get update -qq && apt-get install -y -qq curl git ffmpeg libgl1 libglib2.0-0 bc > /dev/null 2>&1
pip install -q --upgrade pip
pip install -q uv 2>&1 | tail -1
# Auto-upgrade PyTorch for Blackwell GPUs (sm_120+)
GPU_ARCH=$(python3 -c "import subprocess; o=subprocess.check_output(['nvidia-smi','--query-gpu=compute_cap','--format=csv,noheader,nounits'],text=True).strip(); print(o)" 2>/dev/null || echo "0")
if [ "$(echo "$GPU_ARCH >= 12.0" | bc 2>/dev/null)" = "1" ]; then
echo "Blackwell GPU detected (sm_${GPU_ARCH}) — upgrading PyTorch to cu128..."
pip install --upgrade torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu128 -q 2>/dev/null || true
fi
# Backend framework
echo "Installing backend deps..."
uv pip install --system -q \
fastapi 'uvicorn[standard]' websockets httpx pydantic \
pillow opencv-python-headless numpy huggingface-hub python-multipart \
2>&1 | tail -5
echo " backend deps done."
# Processing deps (all lightweight — extract, CLIP, bodycrop, clean)
echo "Installing processing deps..."
uv pip install --system -q \
imagehash \
open-clip-torch transformers accelerate safetensors \
nudenet mediapipe onnxruntime-gpu \
2>&1 | tail -5
echo " processing deps done."
# Pre-download ALL light ML models so they're cached and ready
echo "Pre-downloading ML models..."
python3 << 'PYEOF'
from transformers import AutoModel, AutoProcessor
# SigLIP — default CLIP model
print(" Downloading SigLIP (ViT-SO400M)...")
AutoProcessor.from_pretrained("google/siglip-so400m-patch14-384")
AutoModel.from_pretrained("google/siglip-so400m-patch14-384")
print(" OK: SigLIP")
# CLIP ViT-L
print(" Downloading CLIP ViT-L/14...")
AutoProcessor.from_pretrained("openai/clip-vit-large-patch14")
AutoModel.from_pretrained("openai/clip-vit-large-patch14")
print(" OK: CLIP ViT-L")
# NSFW detector
print(" Downloading NSFW detector...")
from transformers import pipeline as hf_pipeline
hf_pipeline("image-classification", model="Falconsai/nsfw_image_detection")
print(" OK: NSFW detector")
# NudeNet
print(" Downloading NudeNet...")
from nudenet import NudeDetector
NudeDetector()
print(" OK: NudeNet")
print("All models cached.")
PYEOF
# Download backend code from HF
echo "Downloading backend code..."
mkdir -p /workspace/dataset-prep-v3
cd /workspace/dataset-prep-v3
python3 -c "
from huggingface_hub import hf_hub_download
import tarfile
path = hf_hub_download('msrcam/ds-prep-backend', 'backend.tar.gz', repo_type='dataset')
with tarfile.open(path) as t:
t.extractall('/workspace/dataset-prep-v3')
print('Backend code ready.')
" || echo "WARNING: Failed to download backend from HF"
mkdir -p /workspace/dataset-prep-v3/data/settings /workspace/dataset-prep-v3/data/presets
# Write HF config for private repo access
if [ -n "$HF_TOKEN" ]; then
python3 -c "import json; json.dump({'repo':'msrcam/shared-datasets','token':'$HF_TOKEN'}, open('/workspace/dataset-prep-v3/data/hf_config.json','w'))"
echo "HF token configured."
fi
# Start backend
cd /workspace/dataset-prep-v3
nohup python -m uvicorn backend.main:app --host 0.0.0.0 --port 7870 > /tmp/ds-v3.log 2>&1 &
echo $! > /tmp/ds-health.pid
echo "=== Backend running on port 7870 ==="
|