LLaMA-Factory/examples/full_multi_gpu/multi_node.sh

16 lines
343 B
Bash
Raw Normal View History

2024-02-28 15:19:25 +00:00
#!/bin/bash
2024-05-07 09:50:27 +00:00
NPROC_PER_NODE=4
NNODES=2
RANK=0
MASTER_ADDR=192.168.0.1
MASTER_PORT=29500
2024-05-14 15:32:53 +00:00
CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun \
2024-03-06 05:14:57 +00:00
--nproc_per_node $NPROC_PER_NODE \
--nnodes $NNODES \
--node_rank $RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT \
2024-05-06 14:51:02 +00:00
src/train.py examples/full_multi_gpu/llama3_full_sft.yaml