mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
rename method
This commit is contained in:
parent
1b99ed61bc
commit
3c9afd5ca3
@ -89,9 +89,9 @@ def load_model(model_name):
|
|||||||
|
|
||||||
# Quantized model
|
# Quantized model
|
||||||
elif shared.args.gptq_bits > 0:
|
elif shared.args.gptq_bits > 0:
|
||||||
from modules.quant_loader import load_quant
|
from modules.quant_loader import load_quantized
|
||||||
|
|
||||||
model = load_quant(model_name, shared.args.gptq_model_type)
|
model = load_quantized(model_name, shared.args.gptq_model_type)
|
||||||
|
|
||||||
# Custom
|
# Custom
|
||||||
else:
|
else:
|
||||||
|
@ -10,7 +10,7 @@ sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
|
|||||||
|
|
||||||
|
|
||||||
# 4-bit LLaMA
|
# 4-bit LLaMA
|
||||||
def load_quant(model_name, model_type):
|
def load_quantized(model_name, model_type):
|
||||||
if model_type == 'llama':
|
if model_type == 'llama':
|
||||||
from llama import load_quant
|
from llama import load_quant
|
||||||
elif model_type == 'opt':
|
elif model_type == 'opt':
|
||||||
|
Loading…
Reference in New Issue
Block a user