From 372363bc3d5383d8351e45ee77323ba686a59769 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Mon, 13 Mar 2023 12:07:02 -0300 Subject: [PATCH] Fix GPTQ load_quant call on Windows --- modules/quantized_LLaMA.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/quantized_LLaMA.py b/modules/quantized_LLaMA.py index e9352f90..a5757c68 100644 --- a/modules/quantized_LLaMA.py +++ b/modules/quantized_LLaMA.py @@ -40,7 +40,7 @@ def load_quantized_LLaMA(model_name): print(f"Could not find {pt_model}, exiting...") exit() - model = load_quant(path_to_model, str(pt_path), bits) + model = load_quant(str(path_to_model), str(pt_path), bits) # Multiple GPUs or GPU+CPU if shared.args.gpu_memory: