From ca293bb7135395e66fb665f1bc9ed15ed0617f03 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 13 Apr 2023 12:04:27 -0300 Subject: [PATCH] Show a warning if two quantized models are found --- modules/GPTQ_loader.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 34dbb2ae..96189c31 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -131,8 +131,12 @@ def load_quantized(model_name): pt_path = None if len(found_pts) > 0: + if len(found_pts) > 1: + print('Warning: more than one .pt model has been found. The last one will be selected. It could be wrong.') pt_path = found_pts[-1] elif len(found_safetensors) > 0: + if len(found_pts) > 1: + print('Warning: more than one .safetensors model has been found. The last one will be selected. It could be wrong.') pt_path = found_safetensors[-1] if not pt_path: