Use pre-compiled python module for ExLlama (#2770)

This commit is contained in:
jllllll 2023-06-24 18:24:17 -05:00 committed by GitHub
parent a70a2ac3be
commit bef67af23c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 41 additions and 16 deletions

View File

@ -20,10 +20,13 @@ def add_lora_to_model(lora_names):
def add_lora_exllama(lora_names): def add_lora_exllama(lora_names):
try: try:
from repositories.exllama.lora import ExLlamaLora from exllama.lora import ExLlamaLora
except: except:
logger.error("Could not find the file repositories/exllama/lora.py. Make sure that exllama is cloned inside repositories/ and is up to date.") try:
return from repositories.exllama.lora import ExLlamaLora
except:
logger.error("Could not find the file repositories/exllama/lora.py. Make sure that exllama is cloned inside repositories/ and is up to date.")
return
if len(lora_names) == 0: if len(lora_names) == 0:
shared.model.generator.lora = None shared.model.generator.lora = None

View File

@ -3,12 +3,23 @@ from pathlib import Path
from modules import shared from modules import shared
from modules.logging_colors import logger from modules.logging_colors import logger
from modules.relative_imports import RelativeImport
with RelativeImport("repositories/exllama"): try:
from generator import ExLlamaGenerator from exllama.generator import ExLlamaGenerator
from model import ExLlama, ExLlamaCache, ExLlamaConfig from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer from exllama.tokenizer import ExLlamaTokenizer
except:
logger.warning('Exllama module failed to load. Will attempt to load from repositories.')
try:
from modules.relative_imports import RelativeImport
with RelativeImport("repositories/exllama"):
from generator import ExLlamaGenerator
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
except:
logger.error("Could not find repositories/exllama/. Make sure that exllama is cloned inside repositories/ and is up to date.")
raise
class ExllamaModel: class ExllamaModel:

View File

@ -9,10 +9,19 @@ from transformers.modeling_outputs import CausalLMOutputWithPast
from modules import shared from modules import shared
from modules.logging_colors import logger from modules.logging_colors import logger
from modules.relative_imports import RelativeImport
with RelativeImport("repositories/exllama"): try:
from model import ExLlama, ExLlamaCache, ExLlamaConfig from exllama.model import ExLlama, ExLlamaCache, ExLlamaConfig
except:
logger.warning('Exllama module failed to load. Will attempt to load from repositories.')
try:
from modules.relative_imports import RelativeImport
with RelativeImport("repositories/exllama"):
from model import ExLlama, ExLlamaCache, ExLlamaConfig
except:
logger.error("Could not find repositories/exllama/. Make sure that exllama is cloned inside repositories/ and is up to date.")
raise
class ExllamaHF(PreTrainedModel): class ExllamaHF(PreTrainedModel):
@ -68,7 +77,7 @@ class ExllamaHF(PreTrainedModel):
assert len(model_args) == 0 and len(kwargs) == 0, "extra args is currently not supported" assert len(model_args) == 0 and len(kwargs) == 0, "extra args is currently not supported"
if isinstance(pretrained_model_name_or_path, str): if isinstance(pretrained_model_name_or_path, str):
pretrained_model_name_or_path = Path(pretrained_model_name_or_path) pretrained_model_name_or_path = Path(pretrained_model_name_or_path)
pretrained_model_name_or_path = Path(f'{shared.args.model_dir}') / Path(pretrained_model_name_or_path) pretrained_model_name_or_path = Path(f'{shared.args.model_dir}') / Path(pretrained_model_name_or_path)
config = ExLlamaConfig(pretrained_model_name_or_path / 'config.json') config = ExLlamaConfig(pretrained_model_name_or_path / 'config.json')
@ -86,7 +95,7 @@ class ExllamaHF(PreTrainedModel):
if shared.args.gpu_split: if shared.args.gpu_split:
config.set_auto_map(shared.args.gpu_split) config.set_auto_map(shared.args.gpu_split)
config.gpu_peer_fix = True config.gpu_peer_fix = True
# This slowes down a bit but align better with autogptq generation. # This slowes down a bit but align better with autogptq generation.
# TODO: Should give user choice to tune the exllama config # TODO: Should give user choice to tune the exllama config
# config.fused_attn = False # config.fused_attn = False

View File

@ -22,4 +22,6 @@ https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/b
llama-cpp-python==0.1.64; platform_system != "Windows" llama-cpp-python==0.1.64; platform_system != "Windows"
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.64/llama_cpp_python-0.1.64-cp310-cp310-win_amd64.whl; platform_system == "Windows" https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.64/llama_cpp_python-0.1.64-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.2.2/auto_gptq-0.2.2+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows" https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.2.2/auto_gptq-0.2.2+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.2.2/auto_gptq-0.2.2+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.2.2/auto_gptq-0.2.2+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
https://github.com/jllllll/exllama/releases/download/0.0.3/exllama-0.0.3+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/jllllll/exllama/releases/download/0.0.3/exllama-0.0.3+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"

View File

@ -237,8 +237,8 @@ def create_model_menus():
shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed) shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed)
shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='Make sure to inspect the .py files inside the model folder before loading it with this option enabled.') shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='Make sure to inspect the .py files inside the model folder before loading it with this option enabled.')
shared.gradio['gptq_for_llama_info'] = gr.Markdown('GPTQ-for-LLaMa is currently 2x faster than AutoGPTQ on some systems. It is installed by default with the one-click installers. Otherwise, it has to be installed manually following the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#installation-1).') shared.gradio['gptq_for_llama_info'] = gr.Markdown('GPTQ-for-LLaMa is currently 2x faster than AutoGPTQ on some systems. It is installed by default with the one-click installers. Otherwise, it has to be installed manually following the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#installation-1).')
shared.gradio['exllama_info'] = gr.Markdown('ExLlama has to be installed manually. See the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/ExLlama.md).') shared.gradio['exllama_info'] = gr.Markdown('For more information, consult the [docs](https://github.com/oobabooga/text-generation-webui/blob/main/docs/ExLlama.md).')
shared.gradio['exllama_HF_info'] = gr.Markdown('ExLlama_HF is a wrapper that lets you use ExLlama like a Transformers model, which means it can use the Transformers samplers. It\'s still a bit buggy, so feel free to help out by fixing issues.\n\nCheck out PR [#2777](https://github.com/oobabooga/text-generation-webui/pull/2777) for more details.') shared.gradio['exllama_HF_info'] = gr.Markdown('ExLlama_HF is a wrapper that lets you use ExLlama like a Transformers model, which means it can use the Transformers samplers. It\'s a bit slower than the regular ExLlama and doesn\'t support LoRA.')
with gr.Column(): with gr.Column():
with gr.Row(): with gr.Row():