LLaMA-Factory/examples/full_multi_gpu/single_node.sh

33 lines
930 B
Bash
Raw Normal View History

2024-02-28 15:19:25 +00:00
#!/bin/bash
deepspeed --num_gpus 4 ../../src/train_bash.py \
2024-03-20 16:36:06 +00:00
--deepspeed ../deepspeed/ds_z3_config.json \
2024-02-28 15:19:25 +00:00
--stage sft \
--do_train \
--model_name_or_path meta-llama/Llama-2-7b-hf \
2024-03-20 16:36:06 +00:00
--dataset alpaca_gpt4_en,glaive_toolcall \
2024-02-28 15:19:25 +00:00
--dataset_dir ../../data \
--template default \
--finetuning_type full \
--output_dir ../../saves/LLaMA2-7B/full/sft \
--overwrite_cache \
--overwrite_output_dir \
--cutoff_len 1024 \
2024-03-06 05:14:57 +00:00
--preprocessing_num_workers 16 \
2024-02-28 15:19:25 +00:00
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 2 \
--lr_scheduler_type cosine \
--logging_steps 10 \
2024-03-06 05:14:57 +00:00
--warmup_steps 20 \
2024-02-28 15:19:25 +00:00
--save_steps 100 \
--eval_steps 100 \
--evaluation_strategy steps \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--max_samples 3000 \
--val_size 0.1 \
2024-04-02 12:07:43 +00:00
--ddp_timeout 180000000 \
2024-02-28 15:19:25 +00:00
--plot_loss \
--fp16