Fix Training fails when evaluation dataset is selected (#2099)

Fixes https://github.com/oobabooga/text-generation-webui/issues/2078 from Googulator
This commit is contained in:
Forkoz 2023-05-16 16:40:19 +00:00 committed by GitHub
parent 428261eede
commit d205ec9706
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -380,10 +380,10 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch
logging_steps=5, logging_steps=5,
evaluation_strategy="steps" if eval_data is not None else "no", evaluation_strategy="steps" if eval_data is not None else "no",
eval_steps=math.ceil(eval_steps / gradient_accumulation_steps) if eval_data is not None else None, eval_steps=math.ceil(eval_steps / gradient_accumulation_steps) if eval_data is not None else None,
save_strategy="no", save_strategy="steps" if eval_data is not None else "no",
output_dir=lora_file_path, output_dir=lora_file_path,
lr_scheduler_type=lr_scheduler_type, lr_scheduler_type=lr_scheduler_type,
load_best_model_at_end=True if eval_data is not None else False, load_best_model_at_end=eval_data is not None,
# TODO: Enable multi-device support # TODO: Enable multi-device support
ddp_find_unused_parameters=None, ddp_find_unused_parameters=None,
no_cuda=shared.args.cpu no_cuda=shared.args.cpu