Add --checkpoint argument for GPTQ

This commit is contained in:
oobabooga 2023-05-04 15:17:20 -03:00
parent dbddedca3f
commit b6ff138084
3 changed files with 8 additions and 3 deletions

View File

@ -233,10 +233,11 @@ Optionally, you can use the following command-line flags:
| `--model_type MODEL_TYPE` | Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported. | | `--model_type MODEL_TYPE` | Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported. |
| `--groupsize GROUPSIZE` | Group size. | | `--groupsize GROUPSIZE` | Group size. |
| `--pre_layer PRE_LAYER` | The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. | | `--pre_layer PRE_LAYER` | The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. |
| `--checkpoint CHECKPOINT` | The path to the quantized checkpoint file. If not specified, it will be automatically detected. |
| `--monkey-patch` | Apply the monkey patch for using LoRAs with quantized models. | `--monkey-patch` | Apply the monkey patch for using LoRAs with quantized models.
| `--quant_attn` | (triton) Enable quant attention. | `--quant_attn` | (triton) Enable quant attention. |
| `--warmup_autotune` | (triton) Enable warmup autotune. | `--warmup_autotune` | (triton) Enable warmup autotune. |
| `--fused_mlp` | (triton) Enable fused mlp. | `--fused_mlp` | (triton) Enable fused mlp. |
#### FlexGen #### FlexGen

View File

@ -95,6 +95,9 @@ def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exc
# Used to locate the .pt/.safetensors quantized file # Used to locate the .pt/.safetensors quantized file
def find_quantized_model_file(model_name): def find_quantized_model_file(model_name):
if shared.args.checkpoint:
return Path(shared.args.checkpoint)
path_to_model = Path(f'{shared.args.model_dir}/{model_name}') path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
pt_path = None pt_path = None
priority_name_list = [ priority_name_list = [

View File

@ -131,6 +131,7 @@ parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized m
parser.add_argument('--model_type', type=str, help='Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.') parser.add_argument('--model_type', type=str, help='Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.')
parser.add_argument('--groupsize', type=int, default=-1, help='Group size.') parser.add_argument('--groupsize', type=int, default=-1, help='Group size.')
parser.add_argument('--pre_layer', type=int, default=0, help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.') parser.add_argument('--pre_layer', type=int, default=0, help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.')
parser.add_argument('--checkpoint', type=str, help='The path to the quantized checkpoint file. If not specified, it will be automatically detected.')
parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.') parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.')
parser.add_argument('--quant_attn', action='store_true', help='(triton) Enable quant attention.') parser.add_argument('--quant_attn', action='store_true', help='(triton) Enable quant attention.')
parser.add_argument('--warmup_autotune', action='store_true', help='(triton) Enable warmup autotune.') parser.add_argument('--warmup_autotune', action='store_true', help='(triton) Enable warmup autotune.')