diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 1cd3e5cd..0329c8ba 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -66,7 +66,7 @@ def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exc from quant import autotune_warmup, make_quant_attn # triton branch make_quant_attn(model) - if shared.args.warmup_autotune: + if not shared.args.no_warmup_autotune: autotune_warmup(model) except ImportError: # not triton branch pass diff --git a/modules/shared.py b/modules/shared.py index 563d52bb..41ca3132 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -117,7 +117,7 @@ parser.add_argument('--wbits', type=int, default=0, help='GPTQ: Load a pre-quant parser.add_argument('--model_type', type=str, help='GPTQ: Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.') parser.add_argument('--groupsize', type=int, default=-1, help='GPTQ: Group size.') parser.add_argument('--pre_layer', type=int, default=0, help='GPTQ: The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.') -parser.add_argument('--warmup_autotune', action=argparse.BooleanOptionalAction, default=True, help='GPTQ: Enable warmup autotune. Only usable for triton.') +parser.add_argument('--no-warmup_autotune', action='store_true', help='GPTQ: Disable warmup autotune for triton.') # FlexGen parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.')