LLaMA-Factory/examples/lora_multi_gpu/multi_node.sh

7 lines
241 B
Bash
Raw Normal View History

2024-02-28 15:19:25 +00:00
#!/bin/bash
2024-04-23 10:29:46 +00:00
# also launch it on slave machine using slave_config.yaml
2024-02-28 15:19:25 +00:00
2024-03-06 05:14:57 +00:00
CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \
2024-05-06 14:51:02 +00:00
--config_file examples/accelerate/master_config.yaml \
src/train.py examples/lora_multi_gpu/llama3_lora_sft.yaml