Fix Qwen1.5 in llamacpp_HF

This commit is contained in:
oobabooga 2024-02-15 19:03:47 -08:00
parent 080f7132c0
commit b2b74c83a6

View File

@ -257,7 +257,7 @@ def llamacpp_HF_loader(model_name):
path = Path(f'{shared.args.model_dir}/{model_name}') path = Path(f'{shared.args.model_dir}/{model_name}')
# Check if a HF tokenizer is available for the model # Check if a HF tokenizer is available for the model
if all((path / file).exists() for file in ['tokenizer.model', 'tokenizer_config.json']): if all((path / file).exists() for file in ['tokenizer.json', 'tokenizer_config.json']):
logger.info(f'Using tokenizer from: \"{path}\"') logger.info(f'Using tokenizer from: \"{path}\"')
else: else:
logger.error("Could not load the model because a tokenizer in Transformers format was not found.") logger.error("Could not load the model because a tokenizer in Transformers format was not found.")