mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-12-25 05:48:55 +01:00
Add --checkpoint argument for GPTQ
This commit is contained in:
parent
dbddedca3f
commit
b6ff138084
@ -233,10 +233,11 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--model_type MODEL_TYPE` | Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported. |
|
| `--model_type MODEL_TYPE` | Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported. |
|
||||||
| `--groupsize GROUPSIZE` | Group size. |
|
| `--groupsize GROUPSIZE` | Group size. |
|
||||||
| `--pre_layer PRE_LAYER` | The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. |
|
| `--pre_layer PRE_LAYER` | The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. |
|
||||||
|
| `--checkpoint CHECKPOINT` | The path to the quantized checkpoint file. If not specified, it will be automatically detected. |
|
||||||
| `--monkey-patch` | Apply the monkey patch for using LoRAs with quantized models.
|
| `--monkey-patch` | Apply the monkey patch for using LoRAs with quantized models.
|
||||||
| `--quant_attn` | (triton) Enable quant attention.
|
| `--quant_attn` | (triton) Enable quant attention. |
|
||||||
| `--warmup_autotune` | (triton) Enable warmup autotune.
|
| `--warmup_autotune` | (triton) Enable warmup autotune. |
|
||||||
| `--fused_mlp` | (triton) Enable fused mlp.
|
| `--fused_mlp` | (triton) Enable fused mlp. |
|
||||||
|
|
||||||
#### FlexGen
|
#### FlexGen
|
||||||
|
|
||||||
|
@ -95,6 +95,9 @@ def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exc
|
|||||||
|
|
||||||
# Used to locate the .pt/.safetensors quantized file
|
# Used to locate the .pt/.safetensors quantized file
|
||||||
def find_quantized_model_file(model_name):
|
def find_quantized_model_file(model_name):
|
||||||
|
if shared.args.checkpoint:
|
||||||
|
return Path(shared.args.checkpoint)
|
||||||
|
|
||||||
path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
|
path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
|
||||||
pt_path = None
|
pt_path = None
|
||||||
priority_name_list = [
|
priority_name_list = [
|
||||||
|
@ -131,6 +131,7 @@ parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized m
|
|||||||
parser.add_argument('--model_type', type=str, help='Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.')
|
parser.add_argument('--model_type', type=str, help='Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.')
|
||||||
parser.add_argument('--groupsize', type=int, default=-1, help='Group size.')
|
parser.add_argument('--groupsize', type=int, default=-1, help='Group size.')
|
||||||
parser.add_argument('--pre_layer', type=int, default=0, help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.')
|
parser.add_argument('--pre_layer', type=int, default=0, help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.')
|
||||||
|
parser.add_argument('--checkpoint', type=str, help='The path to the quantized checkpoint file. If not specified, it will be automatically detected.')
|
||||||
parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.')
|
parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.')
|
||||||
parser.add_argument('--quant_attn', action='store_true', help='(triton) Enable quant attention.')
|
parser.add_argument('--quant_attn', action='store_true', help='(triton) Enable quant attention.')
|
||||||
parser.add_argument('--warmup_autotune', action='store_true', help='(triton) Enable warmup autotune.')
|
parser.add_argument('--warmup_autotune', action='store_true', help='(triton) Enable warmup autotune.')
|
||||||
|
Loading…
Reference in New Issue
Block a user