From d205ec9706414a95086f73fba9e42e5cfc8c6556 Mon Sep 17 00:00:00 2001 From: Forkoz <59298527+Ph0rk0z@users.noreply.github.com> Date: Tue, 16 May 2023 16:40:19 +0000 Subject: [PATCH] Fix Training fails when evaluation dataset is selected (#2099) Fixes https://github.com/oobabooga/text-generation-webui/issues/2078 from Googulator --- modules/training.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/training.py b/modules/training.py index 278291cc..e2410edc 100644 --- a/modules/training.py +++ b/modules/training.py @@ -380,10 +380,10 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch logging_steps=5, evaluation_strategy="steps" if eval_data is not None else "no", eval_steps=math.ceil(eval_steps / gradient_accumulation_steps) if eval_data is not None else None, - save_strategy="no", + save_strategy="steps" if eval_data is not None else "no", output_dir=lora_file_path, lr_scheduler_type=lr_scheduler_type, - load_best_model_at_end=True if eval_data is not None else False, + load_best_model_at_end=eval_data is not None, # TODO: Enable multi-device support ddp_find_unused_parameters=None, no_cuda=shared.args.cpu