From f3591ccfa130d2e5c2989b39e4bd6d051421cbff Mon Sep 17 00:00:00 2001 From: Light Date: Wed, 12 Apr 2023 23:26:06 +0800 Subject: [PATCH 1/7] Keep minimal change. --- README.md | 1 + modules/GPTQ_loader.py | 73 +++++++++++++++++++++++++++++------------- modules/shared.py | 1 + 3 files changed, 53 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 7fed7e4e..121e0a91 100644 --- a/README.md +++ b/README.md @@ -238,6 +238,7 @@ Optionally, you can use the following command-line flags: | `--model_type MODEL_TYPE` | GPTQ: Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported. | | `--groupsize GROUPSIZE` | GPTQ: Group size. | | `--pre_layer PRE_LAYER` | GPTQ: The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. | +| `--warmup_autotune` | GPTQ: Enable warmup autotune. Only usable for triton. | #### FlexGen diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index aa6aec7a..2d8b9b9e 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -61,6 +61,16 @@ def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exc model.load_state_dict(safe_load(checkpoint), strict=False) else: model.load_state_dict(torch.load(checkpoint), strict=False) + + try: + from quant import autotune_warmup, make_quant_attn + # triton branch + make_quant_attn(model) + if shared.args.warmup_autotune: + autotune_warmup(model) + except ImportError: # not triton branch + pass + model.seqlen = 2048 print('Done.') @@ -94,33 +104,52 @@ def load_quantized(model_name): print("Unknown pre-quantized model type specified. Only 'llama', 'opt' and 'gptj' are supported") exit() - # Now we are going to try to locate the quantized model file. + # Now we are going to try to locate the quantized model file. I think it's cleaner and supports the new name containing groupsize path_to_model = Path(f'{shared.args.model_dir}/{model_name}') - found_pts = list(path_to_model.glob("*.pt")) - found_safetensors = list(path_to_model.glob("*.safetensors")) pt_path = None + priority_name_list = [ + Path(f'{shared.args.model_dir}/{model_name}/{shared.args.wbits}bit-{shared.args.groupsize}g.safetensors'), + Path(f'{shared.args.model_dir}/{model_name}/{shared.args.wbits}bit-{shared.args.groupsize}g.pt'), + Path(f'{shared.args.model_dir}/{model_name}/{shared.args.wbits}bit.safetensors'), + Path(f'{shared.args.model_dir}/{model_name}/{shared.args.wbits}bit.pt'), + Path(f'{shared.args.model_dir}/{model_name}-{shared.args.wbits}bit-{shared.args.groupsize}g.safetensors'), + Path(f'{shared.args.model_dir}/{model_name}-{shared.args.wbits}bit-{shared.args.groupsize}g.pt'), + Path(f'{shared.args.model_dir}/{model_name}-{shared.args.wbits}bit.safetensors'), + Path(f'{shared.args.model_dir}/{model_name}-{shared.args.wbits}bit.pt'), + ] + for path in priority_name_list: + if path.exists(): + pt_path = path + break - if len(found_pts) > 0: - pt_path = found_pts[-1] - elif len(found_safetensors) > 0: - pt_path = found_safetensors[-1] - else: - if path_to_model.name.lower().startswith('llama-7b'): - pt_model = f'llama-7b-{shared.args.wbits}bit' - elif path_to_model.name.lower().startswith('llama-13b'): - pt_model = f'llama-13b-{shared.args.wbits}bit' - elif path_to_model.name.lower().startswith('llama-30b'): - pt_model = f'llama-30b-{shared.args.wbits}bit' - elif path_to_model.name.lower().startswith('llama-65b'): - pt_model = f'llama-65b-{shared.args.wbits}bit' + # For compatibility, do we really need this? + if not pt_path: + path_to_model = Path(f'{shared.args.model_dir}/{model_name}') + found_pts = list(path_to_model.glob("*.pt")) + found_safetensors = list(path_to_model.glob("*.safetensors")) + pt_path = None + + if len(found_pts) > 0: + pt_path = found_pts[-1] + elif len(found_safetensors) > 0: + pt_path = found_safetensors[-1] else: - pt_model = f'{model_name}-{shared.args.wbits}bit' + if path_to_model.name.lower().startswith('llama-7b'): + pt_model = f'llama-7b-{shared.args.wbits}bit' + elif path_to_model.name.lower().startswith('llama-13b'): + pt_model = f'llama-13b-{shared.args.wbits}bit' + elif path_to_model.name.lower().startswith('llama-30b'): + pt_model = f'llama-30b-{shared.args.wbits}bit' + elif path_to_model.name.lower().startswith('llama-65b'): + pt_model = f'llama-65b-{shared.args.wbits}bit' + else: + pt_model = f'{model_name}-{shared.args.wbits}bit' - # Try to find the .safetensors or .pt both in the model dir and in the subfolder - for path in [Path(p + ext) for ext in ['.safetensors', '.pt'] for p in [f"{shared.args.model_dir}/{pt_model}", f"{path_to_model}/{pt_model}"]]: - if path.exists(): - pt_path = path - break + # Try to find the .safetensors or .pt both in the model dir and in the subfolder + for path in [Path(p + ext) for ext in ['.safetensors', '.pt'] for p in [f"{shared.args.model_dir}/{pt_model}", f"{path_to_model}/{pt_model}"]]: + if path.exists(): + pt_path = path + break if not pt_path: print("Could not find the quantized model in .pt or .safetensors format, exiting...") diff --git a/modules/shared.py b/modules/shared.py index a47a13f1..37adea65 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -115,6 +115,7 @@ parser.add_argument('--wbits', type=int, default=0, help='GPTQ: Load a pre-quant parser.add_argument('--model_type', type=str, help='GPTQ: Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.') parser.add_argument('--groupsize', type=int, default=-1, help='GPTQ: Group size.') parser.add_argument('--pre_layer', type=int, default=0, help='GPTQ: The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.') +parser.add_argument('--warmup_autotune', action=argparse.BooleanOptionalAction, default=True, help='GPTQ: Enable warmup autotune. Only usable for triton.') parser.add_argument('--gptq-bits', type=int, default=0, help='DEPRECATED: use --wbits instead.') parser.add_argument('--gptq-model-type', type=str, help='DEPRECATED: use --model_type instead.') parser.add_argument('--gptq-pre-layer', type=int, default=0, help='DEPRECATED: use --pre_layer instead.') From a405064cebefff79fe590a73e5bc6a3a24976847 Mon Sep 17 00:00:00 2001 From: Light Date: Thu, 13 Apr 2023 01:48:17 +0800 Subject: [PATCH 2/7] Better dispatch. --- modules/GPTQ_loader.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 2d8b9b9e..1cd3e5cd 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -165,16 +165,19 @@ def load_quantized(model_name): model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, kernel_switch_threshold=threshold) # accelerate offload (doesn't work properly) - if shared.args.gpu_memory: - memory_map = list(map(lambda x: x.strip(), shared.args.gpu_memory)) - max_cpu_memory = shared.args.cpu_memory.strip() if shared.args.cpu_memory is not None else '99GiB' - max_memory = {} - for i in range(len(memory_map)): - max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i] - max_memory['cpu'] = max_cpu_memory + if shared.args.gpu_memory or torch.cuda.device_count() > 1: + if shared.args.gpu_memory: + memory_map = list(map(lambda x: x.strip(), shared.args.gpu_memory)) + max_cpu_memory = shared.args.cpu_memory.strip() if shared.args.cpu_memory is not None else '99GiB' + max_memory = {} + for i in range(len(memory_map)): + max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i] + max_memory['cpu'] = max_cpu_memory + else: + max_memory = accelerate.utils.get_balanced_memory(model) device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LlamaDecoderLayer"]) - print("Using the following device map for the 4-bit model:", device_map) + print("Using the following device map for the quantized model:", device_map) # https://huggingface.co/docs/accelerate/package_reference/big_modeling#accelerate.dispatch_model model = accelerate.dispatch_model(model, device_map=device_map, offload_buffers=True) From cf58058c3380df057015239cce0000ef33b2b7b0 Mon Sep 17 00:00:00 2001 From: Light Date: Thu, 13 Apr 2023 20:59:49 +0800 Subject: [PATCH 3/7] Change warmup_autotune to a negative switch. --- modules/GPTQ_loader.py | 2 +- modules/shared.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 1cd3e5cd..0329c8ba 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -66,7 +66,7 @@ def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exc from quant import autotune_warmup, make_quant_attn # triton branch make_quant_attn(model) - if shared.args.warmup_autotune: + if not shared.args.no_warmup_autotune: autotune_warmup(model) except ImportError: # not triton branch pass diff --git a/modules/shared.py b/modules/shared.py index 563d52bb..41ca3132 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -117,7 +117,7 @@ parser.add_argument('--wbits', type=int, default=0, help='GPTQ: Load a pre-quant parser.add_argument('--model_type', type=str, help='GPTQ: Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.') parser.add_argument('--groupsize', type=int, default=-1, help='GPTQ: Group size.') parser.add_argument('--pre_layer', type=int, default=0, help='GPTQ: The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.') -parser.add_argument('--warmup_autotune', action=argparse.BooleanOptionalAction, default=True, help='GPTQ: Enable warmup autotune. Only usable for triton.') +parser.add_argument('--no-warmup_autotune', action='store_true', help='GPTQ: Disable warmup autotune for triton.') # FlexGen parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.') From 97e67d136b0df4e977c0f5db6efd4e674c360c7f Mon Sep 17 00:00:00 2001 From: Light Date: Thu, 13 Apr 2023 21:00:58 +0800 Subject: [PATCH 4/7] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 45379d67..382f4e27 100644 --- a/README.md +++ b/README.md @@ -239,7 +239,7 @@ Optionally, you can use the following command-line flags: | `--model_type MODEL_TYPE` | GPTQ: Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported. | | `--groupsize GROUPSIZE` | GPTQ: Group size. | | `--pre_layer PRE_LAYER` | GPTQ: The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. | -| `--warmup_autotune` | GPTQ: Enable warmup autotune. Only usable for triton. | +| `--no-warmup_autotune` | GPTQ: Disable warmup autotune for triton. | #### FlexGen From da74cd7c44965cdea1d3f250d3083dde56f13f22 Mon Sep 17 00:00:00 2001 From: Light Date: Thu, 13 Apr 2023 21:43:32 +0800 Subject: [PATCH 5/7] Generalized weight search path. --- modules/GPTQ_loader.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 0329c8ba..5ef6003e 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -108,14 +108,10 @@ def load_quantized(model_name): path_to_model = Path(f'{shared.args.model_dir}/{model_name}') pt_path = None priority_name_list = [ - Path(f'{shared.args.model_dir}/{model_name}/{shared.args.wbits}bit-{shared.args.groupsize}g.safetensors'), - Path(f'{shared.args.model_dir}/{model_name}/{shared.args.wbits}bit-{shared.args.groupsize}g.pt'), - Path(f'{shared.args.model_dir}/{model_name}/{shared.args.wbits}bit.safetensors'), - Path(f'{shared.args.model_dir}/{model_name}/{shared.args.wbits}bit.pt'), - Path(f'{shared.args.model_dir}/{model_name}-{shared.args.wbits}bit-{shared.args.groupsize}g.safetensors'), - Path(f'{shared.args.model_dir}/{model_name}-{shared.args.wbits}bit-{shared.args.groupsize}g.pt'), - Path(f'{shared.args.model_dir}/{model_name}-{shared.args.wbits}bit.safetensors'), - Path(f'{shared.args.model_dir}/{model_name}-{shared.args.wbits}bit.pt'), + Path(f'{shared.args.model_dir}/{model_name}{hyphen}{shared.args.wbits}bit{group}{ext}') + for ext in ['.safetensors', '.pt'] + for group in ([f'-{shared.args.groupsize}g', ''] if shared.args.groupsize > 0 else ['']) + for hyphen in ['-', f'/{model_name}-', '/'] ] for path in priority_name_list: if path.exists(): From f2bf1a2c9e0074ea7cb98e6d9176940998ba0559 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 13 Apr 2023 11:17:32 -0300 Subject: [PATCH 6/7] Add some comments, remove obsolete code --- modules/GPTQ_loader.py | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 5ef6003e..fc1689b8 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -78,8 +78,9 @@ def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exc def load_quantized(model_name): + + # Find the model type if not shared.args.model_type: - # Try to determine model type from model name name = model_name.lower() if any((k in name for k in ['llama', 'alpaca', 'vicuna'])): model_type = 'llama' @@ -94,6 +95,7 @@ def load_quantized(model_name): else: model_type = shared.args.model_type.lower() + # Select the appropriate load_quant function if shared.args.pre_layer and model_type == 'llama': load_quant = llama_inference_offload.load_quant elif model_type in ('llama', 'opt', 'gptj'): @@ -104,7 +106,7 @@ def load_quantized(model_name): print("Unknown pre-quantized model type specified. Only 'llama', 'opt' and 'gptj' are supported") exit() - # Now we are going to try to locate the quantized model file. I think it's cleaner and supports the new name containing groupsize + # Locate the quantized model file path_to_model = Path(f'{shared.args.model_dir}/{model_name}') pt_path = None priority_name_list = [ @@ -118,7 +120,8 @@ def load_quantized(model_name): pt_path = path break - # For compatibility, do we really need this? + # If the model hasn't been found with a well-behaved name, pick the last .pt + # or the last .safetensors found in its folder as a last resort if not pt_path: path_to_model = Path(f'{shared.args.model_dir}/{model_name}') found_pts = list(path_to_model.glob("*.pt")) @@ -129,23 +132,6 @@ def load_quantized(model_name): pt_path = found_pts[-1] elif len(found_safetensors) > 0: pt_path = found_safetensors[-1] - else: - if path_to_model.name.lower().startswith('llama-7b'): - pt_model = f'llama-7b-{shared.args.wbits}bit' - elif path_to_model.name.lower().startswith('llama-13b'): - pt_model = f'llama-13b-{shared.args.wbits}bit' - elif path_to_model.name.lower().startswith('llama-30b'): - pt_model = f'llama-30b-{shared.args.wbits}bit' - elif path_to_model.name.lower().startswith('llama-65b'): - pt_model = f'llama-65b-{shared.args.wbits}bit' - else: - pt_model = f'{model_name}-{shared.args.wbits}bit' - - # Try to find the .safetensors or .pt both in the model dir and in the subfolder - for path in [Path(p + ext) for ext in ['.safetensors', '.pt'] for p in [f"{shared.args.model_dir}/{pt_model}", f"{path_to_model}/{pt_model}"]]: - if path.exists(): - pt_path = path - break if not pt_path: print("Could not find the quantized model in .pt or .safetensors format, exiting...") From fde6d06167322eecf8e5b3a6bfc5cc381e982efa Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 13 Apr 2023 11:27:03 -0300 Subject: [PATCH 7/7] Prioritize names with the groupsize in them --- modules/GPTQ_loader.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index fc1689b8..34dbb2ae 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -115,6 +115,8 @@ def load_quantized(model_name): for group in ([f'-{shared.args.groupsize}g', ''] if shared.args.groupsize > 0 else ['']) for hyphen in ['-', f'/{model_name}-', '/'] ] + if shared.args.groupsize > 0: + priority_name_list = [i for i in priority_name_list if str(shared.args.groupsize) in i.name] + [i for i in priority_name_list if str(shared.args.groupsize) not in i.name] for path in priority_name_list: if path.exists(): pt_path = path