diff --git a/modules/evaluate.py b/modules/evaluate.py index 8044e203..a569674e 100644 --- a/modules/evaluate.py +++ b/modules/evaluate.py @@ -59,13 +59,13 @@ def calculate_perplexity(models, input_dataset, stride, _max_length): for model in models: if is_in_past_evaluations(model, input_dataset, stride, _max_length): - cumulative_log += f"{model} has already been tested. Ignoring.\n\n" + cumulative_log += f"`{model}` has already been tested. Ignoring.\n\n" yield cumulative_log continue if model != 'current model': try: - yield cumulative_log + f"Loading {model}...\n\n" + yield cumulative_log + f"Loading `{model}`...\n\n" model_settings = get_model_metadata(model) shared.settings.update({k: v for k, v in model_settings.items() if k in shared.settings}) # hijacking the interface defaults update_model_parameters(model_settings) # hijacking the command-line arguments @@ -73,11 +73,11 @@ def calculate_perplexity(models, input_dataset, stride, _max_length): unload_model() shared.model, shared.tokenizer = load_model(shared.model_name) except: - cumulative_log += f"Failed to load {model}. Moving on.\n\n" + cumulative_log += f"Failed to load `{model}`. Moving on.\n\n" yield cumulative_log continue - cumulative_log += f"Processing {shared.model_name}...\n\n" + cumulative_log += f"Processing `{shared.model_name}`...\n\n" yield cumulative_log + "Tokenizing the input dataset...\n\n" encodings = encode(text, add_special_tokens=False) seq_len = encodings.shape[1] @@ -115,7 +115,7 @@ def calculate_perplexity(models, input_dataset, stride, _max_length): ppl = torch.exp(torch.stack(nlls).mean()) add_entry_to_past_evaluations(float(ppl), shared.model_name, input_dataset, stride, _max_length) save_past_evaluations(past_evaluations) - cumulative_log += f"The perplexity for {shared.model_name} is: {float(ppl)}\n\n" + cumulative_log += f"The perplexity for `{shared.model_name}` is: {float(ppl)}\n\n" yield cumulative_log diff --git a/modules/models_settings.py b/modules/models_settings.py index 9b9e9bd0..1cf26902 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -220,4 +220,4 @@ def save_model_settings(model, state): with open(p, 'w') as f: f.write(output) - yield (f"Settings for {model} saved to {p}") + yield (f"Settings for `{model}` saved to `{p}`.")