diff --git a/README.md b/README.md index f36a8b9b..f527cd55 100644 --- a/README.md +++ b/README.md @@ -156,7 +156,7 @@ text-generation-webui In the "Model" tab of the UI, those models can be automatically downloaded from Hugging Face. You can also download them via the command-line with `python download-model.py organization/model`. -* GGUF models are a single file and should be placed directly into `models`. Example: +* GGML/GGUF models are a single file and should be placed directly into `models`. Example: ``` text-generation-webui @@ -258,7 +258,7 @@ Optionally, you can use the following command-line flags: | `--quant_type QUANT_TYPE` | quant_type for 4-bit. Valid options: nf4, fp4. | | `--use_double_quant` | use_double_quant for 4-bit. | -#### GGUF (for llama.cpp and ctransformers) +#### GGML/GGUF (for llama.cpp and ctransformers) | Flag | Description | |-------------|-------------| diff --git a/download-model.py b/download-model.py index 3bb4a39b..b36865d7 100644 --- a/download-model.py +++ b/download-model.py @@ -83,7 +83,7 @@ class ModelDownloader: is_ggml = re.match(r".*ggml.*\.bin", fname) is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname) is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer - if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_tokenizer, is_text)): + if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_ggml, is_tokenizer, is_text)): if 'lfs' in dict[i]: sha256.append([fname, dict[i]['lfs']['oid']]) diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index a2dcb34b..ce8c6d15 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -37,6 +37,7 @@ def llama_cpp_lib(model_file: Union[str, Path] = None): gguf_model = is_gguf(model_file) else: gguf_model = True + if shared.args.cpu or llama_cpp_cuda is None: return llama_cpp if gguf_model else llama_cpp_ggml else: @@ -205,7 +206,7 @@ class LlamacppHF(PreTrainedModel): 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, 'logits_all': True, } - + if not is_gguf(model_file): ggml_params = { 'n_gqa': shared.args.n_gqa or None, diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index 4908ecb7..12aa3a4f 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -37,6 +37,7 @@ def llama_cpp_lib(model_file: Union[str, Path] = None): gguf_model = is_gguf(model_file) else: gguf_model = True + if shared.args.cpu or llama_cpp_cuda is None: return llama_cpp if gguf_model else llama_cpp_ggml else: @@ -58,8 +59,8 @@ class LlamaCppModel: @classmethod def from_pretrained(self, path): - Llama = llama_cpp_lib(str(path)).Llama - LlamaCache = llama_cpp_lib(str(path)).LlamaCache + Llama = llama_cpp_lib(path).Llama + LlamaCache = llama_cpp_lib(path).LlamaCache result = self() cache_capacity = 0 @@ -93,8 +94,8 @@ class LlamaCppModel: 'tensor_split': tensor_split_list, 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, } - - if not is_gguf(str(path)): + + if not is_gguf(path): ggml_params = { 'n_gqa': shared.args.n_gqa or None, 'rms_norm_eps': shared.args.rms_norm_eps or None, diff --git a/modules/utils.py b/modules/utils.py index 3862817d..15dbd9dd 100644 --- a/modules/utils.py +++ b/modules/utils.py @@ -126,10 +126,14 @@ def get_datasets(path: str, ext: str): def get_available_chat_styles(): return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys) -# Determines if a llama.cpp model is in GGUF format -# Copied from ctransformers utils.py + def is_gguf(path: Union[str, Path]) -> bool: + ''' + Determines if a llama.cpp model is in GGUF format + Copied from ctransformers utils.py + ''' path = str(Path(path).resolve()) with open(path, "rb") as f: magic = f.read(4) + return magic == "GGUF".encode() diff --git a/requirements.txt b/requirements.txt index 0c6aeb1b..7311370b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,19 +22,26 @@ tensorboard tqdm wandb +# bitsandbytes bitsandbytes==0.41.1; platform_system != "Windows" https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows" + +# AutoGPTQ https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows" https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" + +# ExLlama https://github.com/jllllll/exllama/releases/download/0.0.10/exllama-0.0.10+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows" https://github.com/jllllll/exllama/releases/download/0.0.10/exllama-0.0.10+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" # llama-cpp-python without GPU support llama-cpp-python==0.1.79; platform_system != "Windows" https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.79/llama_cpp_python-0.1.79-cp310-cp310-win_amd64.whl; platform_system == "Windows" + # llama-cpp-python with CUDA support https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows" https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" + # llama-cpp-python with GGML support https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"