mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
ctransformers: gguf support (#3685)
This commit is contained in:
parent
21058c37f7
commit
960980247f
@ -280,7 +280,16 @@ def ctransformers_loader(model_name):
|
||||
if path.is_file():
|
||||
model_file = path
|
||||
else:
|
||||
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.bin'))[0]
|
||||
entries = Path(f'{shared.args.model_dir}/{model_name}')
|
||||
gguf = list(entries.glob('*.gguf'))
|
||||
bin = list(entries.glob('*.bin'))
|
||||
if len(gguf) > 0:
|
||||
model_file = gguf[0]
|
||||
elif len(bin) > 0:
|
||||
model_file = bin[0]
|
||||
else:
|
||||
logger.error("Could not find a model for ctransformers.")
|
||||
return None, None
|
||||
|
||||
logger.info(f'ctransformers weights detected: {model_file}')
|
||||
model, tokenizer = ctrans.from_pretrained(model_file)
|
||||
|
@ -41,4 +41,4 @@ https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_
|
||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.0/gptq_for_llama-0.1.0+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||
|
||||
# ctransformers
|
||||
https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX2/ctransformers-0.2.23+cu117-py3-none-any.whl
|
||||
https://github.com/jllllll/ctransformers-cuBLAS-wheels/releases/download/AVX2/ctransformers-0.2.24+cu117-py3-none-any.whl
|
||||
|
Loading…
Reference in New Issue
Block a user