use updated load_quantized

This commit is contained in:
Ayanami Rei 2023-03-13 22:11:40 +03:00
parent a6a6522b6a
commit 8778b756e6

View File

@ -91,7 +91,7 @@ def load_model(model_name):
elif shared.args.gptq_bits > 0: elif shared.args.gptq_bits > 0:
from modules.quant_loader import load_quantized from modules.quant_loader import load_quantized
model = load_quantized(model_name, shared.args.gptq_model_type.lower()) model = load_quantized(model_name)
# Custom # Custom
else: else: