Add AutoGPTQ support (basic) (#2132)

This commit is contained in:
oobabooga 2023-05-17 11:12:12 -03:00 committed by GitHub
parent 10cf7831f7
commit 1a8151a2b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 56 additions and 2 deletions

View File

@ -0,0 +1,41 @@
import logging
from pathlib import Path
from auto_gptq import AutoGPTQForCausalLM
import modules.shared as shared
from modules.models import get_max_memory_dict
def load_quantized(model_name):
path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
pt_path = None
use_safetensors = False
# Find the model checkpoint
found_pts = list(path_to_model.glob("*.pt"))
found_safetensors = list(path_to_model.glob("*.safetensors"))
if len(found_safetensors) > 0:
if len(found_pts) > 1:
logging.warning('More than one .safetensors model has been found. The last one will be selected. It could be wrong.')
use_safetensors = True
pt_path = found_safetensors[-1]
elif len(found_pts) > 0:
if len(found_pts) > 1:
logging.warning('More than one .pt model has been found. The last one will be selected. It could be wrong.')
pt_path = found_pts[-1]
# Define the params for AutoGPTQForCausalLM.from_quantized
params = {
'model_basename': pt_path.stem,
'device': "cuda:0" if not shared.args.cpu else "cpu",
'use_triton': shared.args.triton,
'use_safetensors': use_safetensors,
'max_memory': get_max_memory_dict()
}
logging.warning(f"The AutoGPTQ params are: {params}")
model = AutoGPTQForCausalLM.from_quantized(path_to_model, **params)
return model

View File

@ -72,6 +72,9 @@ def load_model(model_name):
shared.model_type = find_model_type(model_name)
if shared.args.wbits > 0:
if shared.args.autogptq:
load_func = AutoGPTQ_loader
else:
load_func = GPTQ_loader
elif shared.model_type == 'llamacpp':
load_func = llamacpp_loader
@ -261,6 +264,12 @@ def GPTQ_loader(model_name):
return model
def AutoGPTQ_loader(model_name):
from modules.AutoGPTQ_loader import load_quantized
return load_quantized(model_name)
def get_max_memory_dict():
max_memory = {}
if shared.args.gpu_memory:
@ -283,7 +292,7 @@ def get_max_memory_dict():
logging.warning(f"Auto-assiging --gpu-memory {suggestion} for your GPU to try to prevent out-of-memory errors. You can manually set other values.")
max_memory = {0: f'{suggestion}GiB', 'cpu': f'{shared.args.cpu_memory or 99}GiB'}
return max_memory
return max_memory if len(max_memory) > 0 else None
def clear_torch_cache():

View File

@ -137,6 +137,10 @@ parser.add_argument('--quant_attn', action='store_true', help='(triton) Enable q
parser.add_argument('--warmup_autotune', action='store_true', help='(triton) Enable warmup autotune.')
parser.add_argument('--fused_mlp', action='store_true', help='(triton) Enable fused mlp.')
# AutoGPTQ
parser.add_argument('--autogptq', action='store_true', help='Use AutoGPTQ for loading quantized models instead of the internal GPTQ loader.')
parser.add_argument('--triton', action='store_true', help='Use triton.')
# FlexGen
parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.')
parser.add_argument('--percent', type=int, nargs="+", default=[0, 100, 100, 0, 100, 0], help='FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0).')