# Hugging Face Space (Docker) — GenSearcher + FireRed # Requires GPU. For multi-GPU full-local mode, set START_VLLM_*=1 and CUDA device envs in README. FROM pytorch/pytorch:2.5.1-cuda12.4-cudnn9-runtime ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y --no-install-recommends \ curl \ git \ && rm -rf /var/lib/apt/lists/* WORKDIR /app COPY vendor/rllm /app/vendor/rllm COPY requirements.txt /app/requirements.txt COPY app.py space_gen.py space_health.py /app/ COPY services /app/services COPY scripts /app/scripts COPY docker_entry.py /app/docker_entry.py # Strip BOM + all CR so bash never sees \r (avoids "exec format error" / stray $'\r' errors). RUN python3 -c "import pathlib; p=pathlib.Path('/app/scripts/entrypoint.sh'); b=p.read_bytes(); b=b.lstrip(b'\\xef\\xbb\\xbf'); b=b.replace(b'\\r\\n', b'\\n').replace(b'\\r', b''); p.write_bytes(b)" \ && chmod +x /app/scripts/entrypoint.sh ENV PYTHONPATH=/app/vendor/rllm ENV GRADIO_SERVER_PORT=7860 # HF Spaces / minimal images often have uid 1000 with no /etc/passwd entry; PyTorch Inductor calls # getpass.getuser() and crashes with KeyError. USER/LOGNAME short-circuit getuser(); cache dirs avoid $HOME issues. ENV USER=huggingface ENV LOGNAME=huggingface ENV TORCHINDUCTOR_CACHE_DIR=/tmp/torch_inductor_cache ENV TRITON_CACHE_DIR=/tmp/triton_cache RUN pip install --no-cache-dir --upgrade pip setuptools wheel \ && pip install --no-cache-dir -e /app/vendor/rllm \ && pip install --no-cache-dir -r /app/requirements.txt # Optional: local vLLM inside the image (large). Disable with build-arg if you only use external APIs. ARG INSTALL_VLLM=1 RUN if [ "$INSTALL_VLLM" = "1" ]; then pip install --no-cache-dir "vllm>=0.6.3"; fi EXPOSE 7860 # Python as PID 1: kernel never execve's entrypoint.sh directly (fixes HF "exec format error" on CRLF/BOM). CMD ["python", "/app/docker_entry.py"]