2024-06-07 21:20:54 +00:00
|
|
|
import os
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
2024-06-14 20:05:54 +00:00
|
|
|
from llamafactory.hparams import get_infer_args, get_train_args
|
2024-06-07 21:20:54 +00:00
|
|
|
from llamafactory.model import load_model, load_tokenizer
|
|
|
|
|
|
|
|
|
2024-06-10 13:24:15 +00:00
|
|
|
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
2024-06-07 21:20:54 +00:00
|
|
|
|
2024-06-10 13:24:15 +00:00
|
|
|
TRAIN_ARGS = {
|
2024-06-07 21:20:54 +00:00
|
|
|
"model_name_or_path": TINY_LLAMA,
|
|
|
|
"stage": "sft",
|
|
|
|
"do_train": True,
|
|
|
|
"finetuning_type": "full",
|
2024-06-10 13:24:15 +00:00
|
|
|
"dataset": "llamafactory/tiny-supervised-dataset",
|
2024-06-07 21:20:54 +00:00
|
|
|
"dataset_dir": "ONLINE",
|
|
|
|
"template": "llama3",
|
|
|
|
"cutoff_len": 1024,
|
|
|
|
"overwrite_cache": True,
|
|
|
|
"output_dir": "dummy_dir",
|
|
|
|
"overwrite_output_dir": True,
|
|
|
|
"fp16": True,
|
|
|
|
}
|
|
|
|
|
2024-06-14 20:05:54 +00:00
|
|
|
INFER_ARGS = {
|
|
|
|
"model_name_or_path": TINY_LLAMA,
|
|
|
|
"finetuning_type": "full",
|
|
|
|
"template": "llama3",
|
|
|
|
"infer_dtype": "float16",
|
|
|
|
}
|
|
|
|
|
2024-06-07 21:20:54 +00:00
|
|
|
|
2024-06-14 20:05:54 +00:00
|
|
|
def test_full_train():
|
2024-06-10 13:24:15 +00:00
|
|
|
model_args, _, _, finetuning_args, _ = get_train_args(TRAIN_ARGS)
|
2024-06-07 21:20:54 +00:00
|
|
|
tokenizer_module = load_tokenizer(model_args)
|
|
|
|
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
|
|
|
for param in model.parameters():
|
|
|
|
assert param.requires_grad is True
|
|
|
|
assert param.dtype == torch.float32
|
2024-06-14 20:05:54 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_full_inference():
|
|
|
|
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
|
|
|
|
tokenizer_module = load_tokenizer(model_args)
|
|
|
|
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
|
|
|
|
for param in model.parameters():
|
|
|
|
assert param.requires_grad is False
|
|
|
|
assert param.dtype == torch.float16
|