From cc7b7ba153874cd3b5d6e2ab6fed23ade2dc7782 Mon Sep 17 00:00:00 2001 From: John Smith Date: Mon, 11 Sep 2023 12:22:20 +0800 Subject: [PATCH] fix lora training with alpaca_lora_4bit (#3853) --- docs/GPTQ-models-(4-bit-mode).md | 19 +++++++------------ modules/monkey_patch_gptq_lora.py | 20 ++++++++------------ modules/training.py | 13 +++++++------ 3 files changed, 22 insertions(+), 30 deletions(-) diff --git a/docs/GPTQ-models-(4-bit-mode).md b/docs/GPTQ-models-(4-bit-mode).md index 428d7560..730e8324 100644 --- a/docs/GPTQ-models-(4-bit-mode).md +++ b/docs/GPTQ-models-(4-bit-mode).md @@ -163,22 +163,17 @@ This requires using a monkey patch that is supported by this web UI: https://git To use it: -1. Clone `johnsmith0031/alpaca_lora_4bit` into the repositories folder: +1. Install alpaca_lora_4bit using pip ``` -cd text-generation-webui/repositories -git clone https://github.com/johnsmith0031/alpaca_lora_4bit +git clone https://github.com/johnsmith0031/alpaca_lora_4bit.git +cd alpaca_lora_4bit +git fetch origin winglian-setup_pip +git checkout winglian-setup_pip +pip install . ``` -⚠️ I have tested it with the following commit specifically: `2f704b93c961bf202937b10aac9322b092afdce0` - -2. Install https://github.com/sterlind/GPTQ-for-LLaMa with this command: - -``` -pip install git+https://github.com/sterlind/GPTQ-for-LLaMa.git@lora_4bit -``` - -3. Start the UI with the `--monkey-patch` flag: +2. Start the UI with the `--monkey-patch` flag: ``` python server.py --model llama-7b-4bit-128g --listen --lora tloen_alpaca-lora-7b --monkey-patch diff --git a/modules/monkey_patch_gptq_lora.py b/modules/monkey_patch_gptq_lora.py index bf8d478d..3166bd33 100644 --- a/modules/monkey_patch_gptq_lora.py +++ b/modules/monkey_patch_gptq_lora.py @@ -1,39 +1,35 @@ # Copied from https://github.com/johnsmith0031/alpaca_lora_4bit -import sys from pathlib import Path -sys.path.insert(0, str(Path("repositories/alpaca_lora_4bit"))) - -import autograd_4bit -from amp_wrapper import AMPWrapper -from autograd_4bit import ( +import alpaca_lora_4bit.autograd_4bit as autograd_4bit +from alpaca_lora_4bit.amp_wrapper import AMPWrapper +from alpaca_lora_4bit.autograd_4bit import ( Autograd4bitQuantLinear, load_llama_model_4bit_low_ram ) -from monkeypatch.peft_tuners_lora_monkey_patch import ( - Linear4bitLt, - replace_peft_model_with_gptq_lora_model +from alpaca_lora_4bit.models import Linear4bitLt +from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import ( + replace_peft_model_with_int4_lora_model ) from modules import shared from modules.GPTQ_loader import find_quantized_model_file -replace_peft_model_with_gptq_lora_model() +replace_peft_model_with_int4_lora_model() def load_model_llama(model_name): config_path = str(Path(f'{shared.args.model_dir}/{model_name}')) model_path = str(find_quantized_model_file(model_name)) model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path, groupsize=shared.args.groupsize, is_v1_model=False) - for n, m in model.named_modules(): + for _, m in model.named_modules(): if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt): if m.is_v1_model: m.zeros = m.zeros.half() m.scales = m.scales.half() m.bias = m.bias.half() - autograd_4bit.use_new = True autograd_4bit.auto_switch = True model.half() diff --git a/modules/training.py b/modules/training.py index a993f6f0..30446900 100644 --- a/modules/training.py +++ b/modules/training.py @@ -269,10 +269,10 @@ def calc_trainable_parameters(model): def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, raw_text_file: str, overlap_len: int, newline_favor_len: int, higher_rank_limit: bool, warmup_steps: int, optimizer: str, hard_cut_string: str, train_only_after: str, stop_at_loss: float, add_eos_token: bool, min_chars: int, report_to: str): if shared.args.monkey_patch: - from monkeypatch.peft_tuners_lora_monkey_patch import ( - replace_peft_model_with_gptq_lora_model + from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import ( + replace_peft_model_with_int4_lora_model ) - replace_peft_model_with_gptq_lora_model() + replace_peft_model_with_int4_lora_model() global WANT_INTERRUPT WANT_INTERRUPT = False @@ -512,11 +512,12 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch return if shared.args.monkey_patch: - for n, m in lora_model.named_modules(): - if '4bit' in str(type(m)): + from alpaca_lora_4bit.autograd_4bit import Autograd4bitQuantLinear + from alpaca_lora_4bit.models import Linear4bitLt + for _, m in lora_model.named_modules(): + if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt): if m.is_v1_model: m.zeros = m.zeros.half() - m.scales = m.scales.half() class Tracked():