From 804486214b5a1b07fc4c57255053593bb980d349 Mon Sep 17 00:00:00 2001 From: draff Date: Fri, 10 Mar 2023 23:21:01 +0000 Subject: [PATCH] Re-implement --load-in-4bit and update --llama-bits arg description --- README.md | 3 ++- modules/models.py | 8 ++++++-- modules/shared.py | 3 ++- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5c560172..76774c0b 100644 --- a/README.md +++ b/README.md @@ -138,7 +138,8 @@ Optionally, you can use the following command-line flags: | `--cai-chat` | Launch the web UI in chat mode with a style similar to Character.AI's. If the file `img_bot.png` or `img_bot.jpg` exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, `img_me.png` or `img_me.jpg` will be used as your profile picture. | | `--cpu` | Use the CPU to generate text.| | `--load-in-8bit` | Load the model with 8-bit precision.| -| `--llama-bits` | Load LLaMA models with specified precision. 2, 3 and 4 bit are supported, use standard `--load-in-8bit` for 8bit precision. | +| `--load-in-4bit` | Load the model with 4-bit precision. Currently only works with LLaMA.| +| `--llama-bits` | Load pre-quantized models with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA. | | `--bf16` | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. | | `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.| | `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. | diff --git a/modules/models.py b/modules/models.py index 3ec68f17..6c423a25 100644 --- a/modules/models.py +++ b/modules/models.py @@ -88,9 +88,13 @@ def load_model(model_name): return model, tokenizer # 4-bit LLaMA - elif shared.args.llama_bits>0: + elif shared.args.llama_bits>0 or shared.args.load_in_4bit: sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa"))) - bits = shared.args.llama_bits + if shared.args.load_in_4bit: + bits = 4 + else: + bits = shared.args.llama_bits + from llama import load_quant diff --git a/modules/shared.py b/modules/shared.py index 61d5a768..f3f46329 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -67,7 +67,8 @@ parser.add_argument('--chat', action='store_true', help='Launch the web UI in ch parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.') parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.') parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.') -parser.add_argument('--llama-bits', type=int, default=0, help='Load LLaMA models with specified precision. 2, 3 and 4 bit are supported, use standard `--load-in-8bit` for 8bit precision.') +parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision. Currently only works with LLaMA.') +parser.add_argument('--llama-bits', type=int, default=0, help='Load pre-quantized models with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA.') parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.') parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.') parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')