fix lora training with alpaca_lora_4bit (#3853)

This commit is contained in:
John Smith 2023-09-11 12:22:20 +08:00 committed by GitHub
parent 15e9b8c915
commit cc7b7ba153
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 22 additions and 30 deletions

View File

@ -163,22 +163,17 @@ This requires using a monkey patch that is supported by this web UI: https://git
To use it: To use it:
1. Clone `johnsmith0031/alpaca_lora_4bit` into the repositories folder: 1. Install alpaca_lora_4bit using pip
``` ```
cd text-generation-webui/repositories git clone https://github.com/johnsmith0031/alpaca_lora_4bit.git
git clone https://github.com/johnsmith0031/alpaca_lora_4bit cd alpaca_lora_4bit
git fetch origin winglian-setup_pip
git checkout winglian-setup_pip
pip install .
``` ```
⚠️ I have tested it with the following commit specifically: `2f704b93c961bf202937b10aac9322b092afdce0` 2. Start the UI with the `--monkey-patch` flag:
2. Install https://github.com/sterlind/GPTQ-for-LLaMa with this command:
```
pip install git+https://github.com/sterlind/GPTQ-for-LLaMa.git@lora_4bit
```
3. Start the UI with the `--monkey-patch` flag:
``` ```
python server.py --model llama-7b-4bit-128g --listen --lora tloen_alpaca-lora-7b --monkey-patch python server.py --model llama-7b-4bit-128g --listen --lora tloen_alpaca-lora-7b --monkey-patch

View File

@ -1,39 +1,35 @@
# Copied from https://github.com/johnsmith0031/alpaca_lora_4bit # Copied from https://github.com/johnsmith0031/alpaca_lora_4bit
import sys
from pathlib import Path from pathlib import Path
sys.path.insert(0, str(Path("repositories/alpaca_lora_4bit"))) import alpaca_lora_4bit.autograd_4bit as autograd_4bit
from alpaca_lora_4bit.amp_wrapper import AMPWrapper
import autograd_4bit from alpaca_lora_4bit.autograd_4bit import (
from amp_wrapper import AMPWrapper
from autograd_4bit import (
Autograd4bitQuantLinear, Autograd4bitQuantLinear,
load_llama_model_4bit_low_ram load_llama_model_4bit_low_ram
) )
from monkeypatch.peft_tuners_lora_monkey_patch import ( from alpaca_lora_4bit.models import Linear4bitLt
Linear4bitLt, from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
replace_peft_model_with_gptq_lora_model replace_peft_model_with_int4_lora_model
) )
from modules import shared from modules import shared
from modules.GPTQ_loader import find_quantized_model_file from modules.GPTQ_loader import find_quantized_model_file
replace_peft_model_with_gptq_lora_model() replace_peft_model_with_int4_lora_model()
def load_model_llama(model_name): def load_model_llama(model_name):
config_path = str(Path(f'{shared.args.model_dir}/{model_name}')) config_path = str(Path(f'{shared.args.model_dir}/{model_name}'))
model_path = str(find_quantized_model_file(model_name)) model_path = str(find_quantized_model_file(model_name))
model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path, groupsize=shared.args.groupsize, is_v1_model=False) model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path, groupsize=shared.args.groupsize, is_v1_model=False)
for n, m in model.named_modules(): for _, m in model.named_modules():
if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt): if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
if m.is_v1_model: if m.is_v1_model:
m.zeros = m.zeros.half() m.zeros = m.zeros.half()
m.scales = m.scales.half() m.scales = m.scales.half()
m.bias = m.bias.half() m.bias = m.bias.half()
autograd_4bit.use_new = True
autograd_4bit.auto_switch = True autograd_4bit.auto_switch = True
model.half() model.half()

View File

@ -269,10 +269,10 @@ def calc_trainable_parameters(model):
def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, raw_text_file: str, overlap_len: int, newline_favor_len: int, higher_rank_limit: bool, warmup_steps: int, optimizer: str, hard_cut_string: str, train_only_after: str, stop_at_loss: float, add_eos_token: bool, min_chars: int, report_to: str): def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, raw_text_file: str, overlap_len: int, newline_favor_len: int, higher_rank_limit: bool, warmup_steps: int, optimizer: str, hard_cut_string: str, train_only_after: str, stop_at_loss: float, add_eos_token: bool, min_chars: int, report_to: str):
if shared.args.monkey_patch: if shared.args.monkey_patch:
from monkeypatch.peft_tuners_lora_monkey_patch import ( from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
replace_peft_model_with_gptq_lora_model replace_peft_model_with_int4_lora_model
) )
replace_peft_model_with_gptq_lora_model() replace_peft_model_with_int4_lora_model()
global WANT_INTERRUPT global WANT_INTERRUPT
WANT_INTERRUPT = False WANT_INTERRUPT = False
@ -512,11 +512,12 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch
return return
if shared.args.monkey_patch: if shared.args.monkey_patch:
for n, m in lora_model.named_modules(): from alpaca_lora_4bit.autograd_4bit import Autograd4bitQuantLinear
if '4bit' in str(type(m)): from alpaca_lora_4bit.models import Linear4bitLt
for _, m in lora_model.named_modules():
if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
if m.is_v1_model: if m.is_v1_model:
m.zeros = m.zeros.half() m.zeros = m.zeros.half()
m.scales = m.scales.half() m.scales = m.scales.half()
class Tracked(): class Tracked():