LLaMA-Factory/docker/docker-cuda/Dockerfile

60 lines
1.7 KiB
Docker
Raw Normal View History

2024-06-10 16:19:17 +00:00
# Use the NVIDIA official image with PyTorch 2.3.0
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-02.html
FROM nvcr.io/nvidia/pytorch:24.02-py3
2024-06-27 12:14:48 +00:00
# Define environments
ENV MAX_JOBS=4
2024-06-27 17:28:59 +00:00
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
2024-07-11 16:25:48 +00:00
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
2024-06-27 12:14:48 +00:00
2024-06-10 16:19:17 +00:00
# Define installation arguments
ARG INSTALL_BNB=false
ARG INSTALL_VLLM=false
ARG INSTALL_DEEPSPEED=false
2024-06-27 12:14:48 +00:00
ARG INSTALL_FLASHATTN=false
2024-06-10 16:19:17 +00:00
ARG PIP_INDEX=https://pypi.org/simple
# Set the working directory
2024-06-24 15:41:35 +00:00
WORKDIR /app
2024-06-10 16:19:17 +00:00
# Install the requirements
2024-06-24 16:46:08 +00:00
COPY requirements.txt /app
2024-06-27 12:14:48 +00:00
RUN pip config set global.index-url "$PIP_INDEX" && \
pip config set global.extra-index-url "$PIP_INDEX" && \
python -m pip install --upgrade pip && \
python -m pip install -r requirements.txt
2024-06-24 15:41:35 +00:00
# Copy the rest of the application into the image
2024-06-24 16:46:08 +00:00
COPY . /app
2024-06-10 16:19:17 +00:00
# Install the LLaMA Factory
RUN EXTRA_PACKAGES="metrics"; \
2024-06-27 12:14:48 +00:00
if [ "$INSTALL_BNB" == "true" ]; then \
2024-06-10 16:19:17 +00:00
EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
fi; \
2024-06-27 12:14:48 +00:00
if [ "$INSTALL_VLLM" == "true" ]; then \
2024-06-10 16:19:17 +00:00
EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
fi; \
2024-06-27 12:14:48 +00:00
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
2024-06-10 16:19:17 +00:00
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
fi; \
2024-06-27 12:14:48 +00:00
pip install -e ".[$EXTRA_PACKAGES]"
2024-06-25 07:13:07 +00:00
2024-07-11 16:15:15 +00:00
# Rebuild flash attention
RUN pip uninstall -y transformer-engine flash-attn && \
if [ "$INSTALL_FLASHATTN" == "true" ]; then \
pip uninstall -y ninja && pip install ninja && \
pip install --no-cache-dir flash-attn --no-build-isolation; \
fi
2024-06-10 16:19:17 +00:00
# Set up volumes
2024-06-24 17:51:29 +00:00
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
2024-06-10 16:19:17 +00:00
# Expose port 7860 for the LLaMA Board
2024-06-24 16:46:08 +00:00
ENV GRADIO_SERVER_PORT 7860
EXPOSE 7860
2024-06-10 16:19:17 +00:00
# Expose port 8000 for the API service
2024-06-24 16:46:08 +00:00
ENV API_PORT 8000
2024-06-10 16:19:17 +00:00
EXPOSE 8000