mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-23 00:18:20 +01:00
Fix hang in tokenizer for AutoGPTQ llama models. (#2399)
This commit is contained in:
parent
2f811b1bdf
commit
60ae80cf28
@ -114,7 +114,7 @@ def load_tokenizer(model_name, model):
|
|||||||
tokenizer = None
|
tokenizer = None
|
||||||
if shared.model_type == 'gpt4chan' and Path(f"{shared.args.model_dir}/gpt-j-6B/").exists():
|
if shared.model_type == 'gpt4chan' and Path(f"{shared.args.model_dir}/gpt-j-6B/").exists():
|
||||||
tokenizer = AutoTokenizer.from_pretrained(Path(f"{shared.args.model_dir}/gpt-j-6B/"))
|
tokenizer = AutoTokenizer.from_pretrained(Path(f"{shared.args.model_dir}/gpt-j-6B/"))
|
||||||
elif type(model) is transformers.LlamaForCausalLM:
|
elif type(model) is transformers.LlamaForCausalLM or "LlamaGPTQForCausalLM" in str(type(model)):
|
||||||
# Try to load an universal LLaMA tokenizer
|
# Try to load an universal LLaMA tokenizer
|
||||||
if shared.model_type not in ['llava', 'oasst']:
|
if shared.model_type not in ['llava', 'oasst']:
|
||||||
for p in [Path(f"{shared.args.model_dir}/llama-tokenizer/"), Path(f"{shared.args.model_dir}/oobabooga_llama-tokenizer/")]:
|
for p in [Path(f"{shared.args.model_dir}/llama-tokenizer/"), Path(f"{shared.args.model_dir}/oobabooga_llama-tokenizer/")]:
|
||||||
|
Loading…
Reference in New Issue
Block a user