diff --git a/README.md b/README.md index e1b45236..4b42edd7 100644 --- a/README.md +++ b/README.md @@ -448,7 +448,8 @@ docker build -f ./docker/docker-cuda/Dockerfile \ -t llamafactory:latest . docker run -dit --gpus=all \ - -v ./hf_cache:/root/.cache/huggingface/ \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ -v ./data:/app/data \ -v ./output:/app/output \ -p 7860:7860 \ @@ -471,7 +472,8 @@ docker build -f ./docker/docker-npu/Dockerfile \ # Change `device` upon your resources docker run -dit \ - -v ./hf_cache:/root/.cache/huggingface/ \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ -v ./data:/app/data \ -v ./output:/app/output \ -v /usr/local/dcmi:/usr/local/dcmi \ diff --git a/README_zh.md b/README_zh.md index 32edb1f7..3926c09d 100644 --- a/README_zh.md +++ b/README_zh.md @@ -448,7 +448,8 @@ docker build -f ./docker/docker-cuda/Dockerfile \ -t llamafactory:latest . docker run -dit --gpus=all \ - -v ./hf_cache:/root/.cache/huggingface/ \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ -v ./data:/app/data \ -v ./output:/app/output \ -p 7860:7860 \ @@ -471,7 +472,8 @@ docker build -f ./docker/docker-npu/Dockerfile \ # 根据您的资源更改 `device` docker run -dit \ - -v ./hf_cache:/root/.cache/huggingface/ \ + -v ./hf_cache:/root/.cache/huggingface \ + -v ./ms_cache:/root/.cache/modelscope \ -v ./data:/app/data \ -v ./output:/app/output \ -v /usr/local/dcmi:/usr/local/dcmi \ diff --git a/docker/docker-cuda/Dockerfile b/docker/docker-cuda/Dockerfile index 2d20bfe4..827b7b3c 100644 --- a/docker/docker-cuda/Dockerfile +++ b/docker/docker-cuda/Dockerfile @@ -36,7 +36,7 @@ RUN EXTRA_PACKAGES="metrics"; \ pip uninstall -y transformer-engine flash-attn # Set up volumes -VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ] +VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ] # Expose port 7860 for the LLaMA Board ENV GRADIO_SERVER_PORT 7860 diff --git a/docker/docker-cuda/docker-compose.yml b/docker/docker-cuda/docker-compose.yml index 04d6531a..1c0a3c75 100644 --- a/docker/docker-cuda/docker-compose.yml +++ b/docker/docker-cuda/docker-compose.yml @@ -10,7 +10,8 @@ services: PIP_INDEX: https://pypi.org/simple container_name: llamafactory volumes: - - ./hf_cache:/root/.cache/huggingface/ + - ./hf_cache:/root/.cache/huggingface + - ./ms_cache:/root/.cache/modelscope - ./data:/app/data - ./output:/app/output ports: diff --git a/docker/docker-npu/Dockerfile b/docker/docker-npu/Dockerfile index 0fdd4472..08de626b 100644 --- a/docker/docker-npu/Dockerfile +++ b/docker/docker-npu/Dockerfile @@ -30,7 +30,7 @@ RUN EXTRA_PACKAGES="torch-npu,metrics"; \ pip uninstall -y transformer-engine flash-attn # Set up volumes -VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ] +VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ] # Expose port 7860 for the LLaMA Board ENV GRADIO_SERVER_PORT 7860 diff --git a/docker/docker-npu/docker-compose.yml b/docker/docker-npu/docker-compose.yml index 7fff6e73..a6b878fd 100644 --- a/docker/docker-npu/docker-compose.yml +++ b/docker/docker-npu/docker-compose.yml @@ -8,7 +8,8 @@ services: PIP_INDEX: https://pypi.org/simple container_name: llamafactory volumes: - - ./hf_cache:/root/.cache/huggingface/ + - ./hf_cache:/root/.cache/huggingface + - ./ms_cache:/root/.cache/modelscope - ./data:/app/data - ./output:/app/output - /usr/local/dcmi:/usr/local/dcmi