text-generation-webui/modules/GPTQ_loader.py

85 lines
3.3 KiB
Python
Raw Normal View History

import re
2023-03-12 15:12:34 +01:00
import sys
from pathlib import Path
import accelerate
import torch
import modules.shared as shared
sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
2023-03-13 20:45:08 +01:00
import llama
import llama_inference_offload
2023-03-13 20:45:08 +01:00
import opt
2023-03-12 15:12:34 +01:00
2023-03-13 20:11:32 +01:00
def load_quantized(model_name):
2023-03-24 23:57:12 +01:00
if not shared.args.gptq_model_type:
2023-03-13 20:11:32 +01:00
# Try to determine model type from model name
model_type = model_name.split('-')[0].lower()
if model_type not in ('llama', 'opt'):
print("Can't determine model type from model name. Please specify it manually using --gptq-model-type "
"argument")
exit()
else:
2023-03-24 23:57:12 +01:00
model_type = shared.args.gptq_model_type.lower()
2023-03-13 20:11:32 +01:00
if model_type == 'llama':
2023-03-24 23:57:12 +01:00
if not shared.args.gptq_pre_layer:
load_quant = llama.load_quant
else:
load_quant = llama_inference_offload.load_quant
elif model_type == 'opt':
2023-03-13 20:45:08 +01:00
load_quant = opt.load_quant
2023-03-12 15:12:34 +01:00
else:
print("Unknown pre-quantized model type specified. Only 'llama' and 'opt' are supported")
exit()
2023-03-12 15:12:34 +01:00
path_to_model = Path(f'models/{model_name}')
2023-03-13 20:11:32 +01:00
if path_to_model.name.lower().startswith('llama-7b'):
2023-03-24 23:57:12 +01:00
pt_model = f'llama-7b-{shared.args.gptq_bits}bit.pt'
2023-03-13 20:11:32 +01:00
elif path_to_model.name.lower().startswith('llama-13b'):
2023-03-24 23:57:12 +01:00
pt_model = f'llama-13b-{shared.args.gptq_bits}bit.pt'
2023-03-13 20:11:32 +01:00
elif path_to_model.name.lower().startswith('llama-30b'):
2023-03-24 23:57:12 +01:00
pt_model = f'llama-30b-{shared.args.gptq_bits}bit.pt'
2023-03-13 20:11:32 +01:00
elif path_to_model.name.lower().startswith('llama-65b'):
2023-03-24 23:57:12 +01:00
pt_model = f'llama-65b-{shared.args.gptq_bits}bit.pt'
2023-03-13 20:11:32 +01:00
else:
2023-03-24 23:57:12 +01:00
pt_model = f'{model_name}-{shared.args.gptq_bits}bit.pt'
2023-03-12 15:12:34 +01:00
# Try to find the .pt both in models/ and in the subfolder
pt_path = None
for path in [Path(p) for p in [f"models/{pt_model}", f"{path_to_model}/{pt_model}"]]:
if path.exists():
pt_path = path
if not pt_path:
print(f"Could not find {pt_model}, exiting...")
exit()
2023-03-20 20:40:08 +01:00
# qwopqwop200's offload
2023-03-24 23:57:12 +01:00
if shared.args.gptq_pre_layer:
model = load_quant(str(path_to_model), str(pt_path), shared.args.gptq_bits, shared.args.gptq_pre_layer)
else:
2023-03-24 23:57:12 +01:00
model = load_quant(str(path_to_model), str(pt_path), shared.args.gptq_bits)
2023-03-12 15:12:34 +01:00
2023-03-20 20:40:08 +01:00
# accelerate offload (doesn't work properly)
if shared.args.gpu_memory:
memory_map = list(map(lambda x : x.strip(), shared.args.gpu_memory))
max_cpu_memory = shared.args.cpu_memory.strip() if shared.args.cpu_memory is not None else '99GiB'
max_memory = {}
for i in range(len(memory_map)):
max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i]
max_memory['cpu'] = max_cpu_memory
2023-03-12 15:12:34 +01:00
device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LlamaDecoderLayer"])
print("Using the following device map for the 4-bit model:", device_map)
# https://huggingface.co/docs/accelerate/package_reference/big_modeling#accelerate.dispatch_model
model = accelerate.dispatch_model(model, device_map=device_map, offload_buffers=True)
2023-03-20 20:40:08 +01:00
# No offload
elif not shared.args.cpu:
model = model.to(torch.device('cuda:0'))
2023-03-12 15:12:34 +01:00
return model