fix PPO trainer

This commit is contained in:
hiyouga 2023-08-02 19:10:23 +08:00
parent b5ba87952a
commit 1d8a1878ea
1 changed files with 1 additions and 1 deletions

View File

@ -161,7 +161,7 @@ class PPOPeftTrainer(PPOTrainer, PeftTrainer):
unwrapped_model.pretrained_model.generation_config._from_model_config = False
queries, responses = [], []
query, response = inputs["input_ids"], response[:, inputs["input_ids"].size(-1):].detach().cpu()
query, response = inputs["input_ids"].detach().cpu(), response[:, inputs["input_ids"].size(-1):].detach().cpu()
for i in range(len(query)):
query_length = (query[i] != self.tokenizer.pad_token_id).nonzero()[0]
response_length = (response[i] != self.tokenizer.pad_token_id).nonzero()[-1] + 1