diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 2d8ef612..296077c6 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -185,7 +185,8 @@ def load_quantized(model_name): max_memory = {} for i in range(len(memory_map)): max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i] - max_memory['cpu'] = max_cpu_memory + + max_memory['cpu'] = f'{max_cpu_memory}GiB' if not re.match('.*ib$', max_cpu_memory.lower()) else max_cpu_memory else: max_memory = accelerate.utils.get_balanced_memory(model) diff --git a/modules/models.py b/modules/models.py index 5b838222..e1917a4b 100644 --- a/modules/models.py +++ b/modules/models.py @@ -189,7 +189,7 @@ def load_model(model_name): for i in range(len(memory_map)): max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i] - max_memory['cpu'] = max_cpu_memory + max_memory['cpu'] = f'{max_cpu_memory}GiB' if not re.match('.*ib$', max_cpu_memory.lower()) else max_cpu_memory params['max_memory'] = max_memory elif shared.args.auto_devices: total_mem = (torch.cuda.get_device_properties(0).total_memory / (1024 * 1024))