From d78b04f0b44b0fd5ae6cd338a961443e33c84855 Mon Sep 17 00:00:00 2001 From: Matthew McAllister Date: Mon, 8 May 2023 18:29:09 -0700 Subject: [PATCH] Add error message when GPTQ-for-LLaMa import fails (#1871) --------- Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com> --- modules/GPTQ_loader.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index e9acbbd6..8142c34e 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -12,7 +12,13 @@ from transformers import AutoConfig, AutoModelForCausalLM import modules.shared as shared sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa"))) -import llama_inference_offload + +try: + import llama_inference_offload +except ImportError: + logging.error('Failed to load GPTQ-for-LLaMa') + logging.error('See https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md') + sys.exit(-1) try: from modelutils import find_layers