add multi-gpu support for 4bit gptq LLaMA

This commit is contained in:
deepdiffuser 2023-03-10 04:29:09 -08:00
parent 1d7e893fa1
commit ab47044459

View File

@ -110,6 +110,17 @@ def load_model(model_name):
exit()
model = load_quant(path_to_model, Path(f"models/{pt_model}"), 4)
if shared.args.gpu_memory:
max_memory = {}
for i in range(len(shared.args.gpu_memory)):
max_memory[i] = f"{shared.args.gpu_memory[i]}GiB"
max_memory['cpu'] = f"{shared.args.cpu_memory or '99'}GiB"
import accelerate
device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory)
model = accelerate.dispatch_model(model, device_map=device_map)
else:
model = model.to(torch.device('cuda:0'))
# Custom