fix inference
This commit is contained in:
parent
9cb1f119a4
commit
d3a0692d4d
|
@ -18,10 +18,11 @@ from trl import AutoModelForCausalLMWithValueHead
|
|||
from llmtuner.extras.logging import get_logger
|
||||
from llmtuner.extras.misc import count_parameters, prepare_model_for_training
|
||||
from llmtuner.extras.save_and_load import load_valuehead_params
|
||||
from llmtuner.hparams import FinetuningArguments
|
||||
from llmtuner.tuner.core.adapter import init_adapter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from llmtuner.hparams import ModelArguments, FinetuningArguments
|
||||
from llmtuner.hparams import ModelArguments
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
|
|
@ -68,7 +68,7 @@ class PeftTrainer(Seq2SeqTrainer):
|
|||
else:
|
||||
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
|
||||
|
||||
if self.tokenizer is not None:
|
||||
if self.finetuning_args.finetuning_type == "full" and self.tokenizer is not None:
|
||||
self.tokenizer.save_pretrained(output_dir)
|
||||
|
||||
with open(os.path.join(output_dir, TRAINING_ARGS_NAME), "w", encoding="utf-8") as f:
|
||||
|
|
Loading…
Reference in New Issue