mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-29 10:59:32 +01:00
Minor fixes/cosmetics
This commit is contained in:
parent
d826bc5d1b
commit
7f5370a272
@ -156,7 +156,7 @@ text-generation-webui
|
|||||||
|
|
||||||
In the "Model" tab of the UI, those models can be automatically downloaded from Hugging Face. You can also download them via the command-line with `python download-model.py organization/model`.
|
In the "Model" tab of the UI, those models can be automatically downloaded from Hugging Face. You can also download them via the command-line with `python download-model.py organization/model`.
|
||||||
|
|
||||||
* GGUF models are a single file and should be placed directly into `models`. Example:
|
* GGML/GGUF models are a single file and should be placed directly into `models`. Example:
|
||||||
|
|
||||||
```
|
```
|
||||||
text-generation-webui
|
text-generation-webui
|
||||||
@ -258,7 +258,7 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--quant_type QUANT_TYPE` | quant_type for 4-bit. Valid options: nf4, fp4. |
|
| `--quant_type QUANT_TYPE` | quant_type for 4-bit. Valid options: nf4, fp4. |
|
||||||
| `--use_double_quant` | use_double_quant for 4-bit. |
|
| `--use_double_quant` | use_double_quant for 4-bit. |
|
||||||
|
|
||||||
#### GGUF (for llama.cpp and ctransformers)
|
#### GGML/GGUF (for llama.cpp and ctransformers)
|
||||||
|
|
||||||
| Flag | Description |
|
| Flag | Description |
|
||||||
|-------------|-------------|
|
|-------------|-------------|
|
||||||
|
@ -83,7 +83,7 @@ class ModelDownloader:
|
|||||||
is_ggml = re.match(r".*ggml.*\.bin", fname)
|
is_ggml = re.match(r".*ggml.*\.bin", fname)
|
||||||
is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname)
|
is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname)
|
||||||
is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer
|
is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer
|
||||||
if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_tokenizer, is_text)):
|
if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_ggml, is_tokenizer, is_text)):
|
||||||
if 'lfs' in dict[i]:
|
if 'lfs' in dict[i]:
|
||||||
sha256.append([fname, dict[i]['lfs']['oid']])
|
sha256.append([fname, dict[i]['lfs']['oid']])
|
||||||
|
|
||||||
|
@ -37,6 +37,7 @@ def llama_cpp_lib(model_file: Union[str, Path] = None):
|
|||||||
gguf_model = is_gguf(model_file)
|
gguf_model = is_gguf(model_file)
|
||||||
else:
|
else:
|
||||||
gguf_model = True
|
gguf_model = True
|
||||||
|
|
||||||
if shared.args.cpu or llama_cpp_cuda is None:
|
if shared.args.cpu or llama_cpp_cuda is None:
|
||||||
return llama_cpp if gguf_model else llama_cpp_ggml
|
return llama_cpp if gguf_model else llama_cpp_ggml
|
||||||
else:
|
else:
|
||||||
|
@ -37,6 +37,7 @@ def llama_cpp_lib(model_file: Union[str, Path] = None):
|
|||||||
gguf_model = is_gguf(model_file)
|
gguf_model = is_gguf(model_file)
|
||||||
else:
|
else:
|
||||||
gguf_model = True
|
gguf_model = True
|
||||||
|
|
||||||
if shared.args.cpu or llama_cpp_cuda is None:
|
if shared.args.cpu or llama_cpp_cuda is None:
|
||||||
return llama_cpp if gguf_model else llama_cpp_ggml
|
return llama_cpp if gguf_model else llama_cpp_ggml
|
||||||
else:
|
else:
|
||||||
@ -58,8 +59,8 @@ class LlamaCppModel:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def from_pretrained(self, path):
|
def from_pretrained(self, path):
|
||||||
|
|
||||||
Llama = llama_cpp_lib(str(path)).Llama
|
Llama = llama_cpp_lib(path).Llama
|
||||||
LlamaCache = llama_cpp_lib(str(path)).LlamaCache
|
LlamaCache = llama_cpp_lib(path).LlamaCache
|
||||||
|
|
||||||
result = self()
|
result = self()
|
||||||
cache_capacity = 0
|
cache_capacity = 0
|
||||||
@ -94,7 +95,7 @@ class LlamaCppModel:
|
|||||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||||
}
|
}
|
||||||
|
|
||||||
if not is_gguf(str(path)):
|
if not is_gguf(path):
|
||||||
ggml_params = {
|
ggml_params = {
|
||||||
'n_gqa': shared.args.n_gqa or None,
|
'n_gqa': shared.args.n_gqa or None,
|
||||||
'rms_norm_eps': shared.args.rms_norm_eps or None,
|
'rms_norm_eps': shared.args.rms_norm_eps or None,
|
||||||
|
@ -126,10 +126,14 @@ def get_datasets(path: str, ext: str):
|
|||||||
def get_available_chat_styles():
|
def get_available_chat_styles():
|
||||||
return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys)
|
return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys)
|
||||||
|
|
||||||
# Determines if a llama.cpp model is in GGUF format
|
|
||||||
# Copied from ctransformers utils.py
|
|
||||||
def is_gguf(path: Union[str, Path]) -> bool:
|
def is_gguf(path: Union[str, Path]) -> bool:
|
||||||
|
'''
|
||||||
|
Determines if a llama.cpp model is in GGUF format
|
||||||
|
Copied from ctransformers utils.py
|
||||||
|
'''
|
||||||
path = str(Path(path).resolve())
|
path = str(Path(path).resolve())
|
||||||
with open(path, "rb") as f:
|
with open(path, "rb") as f:
|
||||||
magic = f.read(4)
|
magic = f.read(4)
|
||||||
|
|
||||||
return magic == "GGUF".encode()
|
return magic == "GGUF".encode()
|
||||||
|
@ -22,19 +22,26 @@ tensorboard
|
|||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# bitsandbytes
|
||||||
bitsandbytes==0.41.1; platform_system != "Windows"
|
bitsandbytes==0.41.1; platform_system != "Windows"
|
||||||
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
||||||
|
|
||||||
|
# AutoGPTQ
|
||||||
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||||
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.4.2/auto_gptq-0.4.2+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||||
|
|
||||||
|
# ExLlama
|
||||||
https://github.com/jllllll/exllama/releases/download/0.0.10/exllama-0.0.10+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
https://github.com/jllllll/exllama/releases/download/0.0.10/exllama-0.0.10+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||||
https://github.com/jllllll/exllama/releases/download/0.0.10/exllama-0.0.10+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
https://github.com/jllllll/exllama/releases/download/0.0.10/exllama-0.0.10+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||||
|
|
||||||
# llama-cpp-python without GPU support
|
# llama-cpp-python without GPU support
|
||||||
llama-cpp-python==0.1.79; platform_system != "Windows"
|
llama-cpp-python==0.1.79; platform_system != "Windows"
|
||||||
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.79/llama_cpp_python-0.1.79-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.79/llama_cpp_python-0.1.79-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||||
|
|
||||||
# llama-cpp-python with CUDA support
|
# llama-cpp-python with CUDA support
|
||||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.79+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||||
|
|
||||||
# llama-cpp-python with GGML support
|
# llama-cpp-python with GGML support
|
||||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||||
|
Loading…
Reference in New Issue
Block a user