From 4e34ae05879ce288eeb5649bca62be27992ab348 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 6 Feb 2024 08:22:08 -0800 Subject: [PATCH] Minor logging improvements --- modules/models.py | 10 +++++----- server.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/models.py b/modules/models.py index 5929e868..038669f3 100644 --- a/modules/models.py +++ b/modules/models.py @@ -54,7 +54,7 @@ sampler_hijack.hijack_samplers() def load_model(model_name, loader=None): - logger.info(f"Loading {model_name}") + logger.info(f"Loading \"{model_name}\"") t0 = time.time() shared.is_seq2seq = False @@ -246,7 +246,7 @@ def llamacpp_loader(model_name): else: model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf'))[0] - logger.info(f"llama.cpp weights detected: {model_file}") + logger.info(f"llama.cpp weights detected: \"{model_file}\"") model, tokenizer = LlamaCppModel.from_pretrained(model_file) return model, tokenizer @@ -257,7 +257,7 @@ def llamacpp_HF_loader(model_name): for fname in [model_name, "oobabooga_llama-tokenizer", "llama-tokenizer"]: path = Path(f'{shared.args.model_dir}/{fname}') if all((path / file).exists() for file in ['tokenizer_config.json', 'special_tokens_map.json', 'tokenizer.model']): - logger.info(f'Using tokenizer from: {path}') + logger.info(f'Using tokenizer from: \"{path}\"') break else: logger.error("Could not load the model because a tokenizer in transformers format was not found. Please download oobabooga/llama-tokenizer.") @@ -298,7 +298,7 @@ def ctransformers_loader(model_name): logger.error("Could not find a model for ctransformers.") return None, None - logger.info(f'ctransformers weights detected: {model_file}') + logger.info(f'ctransformers weights detected: \"{model_file}\"') model, tokenizer = ctrans.from_pretrained(model_file) return model, tokenizer @@ -393,7 +393,7 @@ def HQQ_loader(model_name): from hqq.core.quantize import HQQBackend, HQQLinear from hqq.engine.hf import HQQModelForCausalLM - logger.info(f"Loading HQQ model with backend: {shared.args.hqq_backend}") + logger.info(f"Loading HQQ model with backend: \"{shared.args.hqq_backend}\"") model_dir = Path(f'{shared.args.model_dir}/{model_name}') model = HQQModelForCausalLM.from_quantized(str(model_dir)) diff --git a/server.py b/server.py index 6ef9005d..681fe4e7 100644 --- a/server.py +++ b/server.py @@ -187,7 +187,7 @@ if __name__ == "__main__": settings_file = Path('settings.json') if settings_file is not None: - logger.info(f"Loading settings from {settings_file}") + logger.info(f"Loading settings from \"{settings_file}\"") file_contents = open(settings_file, 'r', encoding='utf-8').read() new_settings = json.loads(file_contents) if settings_file.suffix == "json" else yaml.safe_load(file_contents) shared.settings.update(new_settings)