Merge pull request #377 from askmyteapot/Fix-Multi-gpu-GPTQ-Llama-no-tokens

Update GPTQ_Loader.py
This commit is contained in:
oobabooga 2023-03-17 09:47:57 -03:00 committed by GitHub
commit 4c130679c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -61,7 +61,7 @@ def load_quantized(model_name):
max_memory[i] = f"{shared.args.gpu_memory[i]}GiB" max_memory[i] = f"{shared.args.gpu_memory[i]}GiB"
max_memory['cpu'] = f"{shared.args.cpu_memory or '99'}GiB" max_memory['cpu'] = f"{shared.args.cpu_memory or '99'}GiB"
device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LLaMADecoderLayer"]) device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LlamaDecoderLayer"])
model = accelerate.dispatch_model(model, device_map=device_map) model = accelerate.dispatch_model(model, device_map=device_map)
# Single GPU # Single GPU