| |
| |
| FROM nvcr.io/nvidia/pytorch:24.02-py3 |
|
|
| |
| ENV MAX_JOBS=4 |
| ENV FLASH_ATTENTION_FORCE_BUILD=TRUE |
| ENV VLLM_WORKER_MULTIPROC_METHOD=spawn |
|
|
| |
| ARG INSTALL_BNB=false |
| ARG INSTALL_VLLM=false |
| ARG INSTALL_DEEPSPEED=false |
| ARG INSTALL_FLASHATTN=false |
| ARG INSTALL_LIGER_KERNEL=false |
| ARG INSTALL_HQQ=false |
| ARG INSTALL_EETQ=false |
| ARG PIP_INDEX=https://pypi.org/simple |
|
|
| |
| WORKDIR /app |
|
|
| |
| COPY requirements.txt /app |
| RUN pip config set global.index-url "$PIP_INDEX" && \ |
| pip config set global.extra-index-url "$PIP_INDEX" && \ |
| python -m pip install --upgrade pip && \ |
| python -m pip install -r requirements.txt |
|
|
| |
| COPY . /app |
|
|
| |
| RUN EXTRA_PACKAGES="metrics"; \ |
| if [ "$INSTALL_BNB" == "true" ]; then \ |
| EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \ |
| fi; \ |
| if [ "$INSTALL_VLLM" == "true" ]; then \ |
| EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \ |
| fi; \ |
| if [ "$INSTALL_DEEPSPEED" == "true" ]; then \ |
| EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \ |
| fi; \ |
| if [ "$INSTALL_LIGER_KERNEL" == "true" ]; then \ |
| EXTRA_PACKAGES="${EXTRA_PACKAGES},liger-kernel"; \ |
| fi; \ |
| if [ "$INSTALL_HQQ" == "true" ]; then \ |
| EXTRA_PACKAGES="${EXTRA_PACKAGES},hqq"; \ |
| fi; \ |
| if [ "$INSTALL_EETQ" == "true" ]; then \ |
| EXTRA_PACKAGES="${EXTRA_PACKAGES},eetq"; \ |
| fi; \ |
| pip install -e ".[$EXTRA_PACKAGES]" |
|
|
| |
| RUN pip uninstall -y transformer-engine flash-attn && \ |
| if [ "$INSTALL_FLASHATTN" == "true" ]; then \ |
| pip uninstall -y ninja && pip install ninja && \ |
| pip install --no-cache-dir flash-attn --no-build-isolation; \ |
| fi |
|
|
| |
| VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ] |
|
|
| |
| ENV GRADIO_SERVER_PORT 7860 |
| EXPOSE 7860 |
|
|
| |
| ENV API_PORT 8000 |
| EXPOSE 8000 |
|
|