#!/bin/bash CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ --stage ppo \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ --adapter_name_or_path ../../saves/LLaMA2-7B/lora/sft \ --create_new_adapter \ --dataset alpaca_gpt4_en \ --dataset_dir ../../data \ --template default \ --finetuning_type lora \ --lora_target q_proj,v_proj \ --reward_model ../../saves/LLaMA2-7B/lora/reward \ --output_dir ../../saves/LLaMA2-7B/lora/ppo \ --overwrite_cache \ --overwrite_output_dir \ --cutoff_len 512 \ --preprocessing_num_workers 16 \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 8 \ --lr_scheduler_type cosine \ --logging_steps 10 \ --save_steps 100 \ --learning_rate 1e-5 \ --num_train_epochs 1.0 \ --max_samples 1000 \ --top_k 0 \ --top_p 0.9 \ --max_new_tokens 256 \ --plot_loss \ --fp16