From 61611197e0b0596f5e8b25345e693c339139fc1f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 26 Jan 2023 02:12:53 -0300 Subject: [PATCH] Add --verbose option (oops) --- README.md | 1 + server.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/README.md b/README.md index e7be8434..cf72a914 100644 --- a/README.md +++ b/README.md @@ -135,6 +135,7 @@ Optionally, you can use the following command-line flags: | `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example.| | `--listen` | Make the web UI reachable from your local network.| | `--share` | Create a public URL. This is useful for running the web UI on Google Colab or similar. | +| `--verbose` | Print the prompts to the terminal. | Out of memory errors? [Check this guide](https://github.com/oobabooga/text-generation-webui/wiki/Low-VRAM-guide). diff --git a/server.py b/server.py index 8a080206..5f80ae4e 100644 --- a/server.py +++ b/server.py @@ -34,6 +34,7 @@ parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example.') parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.') parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.') +parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.') args = parser.parse_args() if (args.chat or args.cai_chat) and not args.no_stream: @@ -164,6 +165,9 @@ def formatted_outputs(reply, model_name): def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None, stopping_string=None): global model, tokenizer, model_name, loaded_preset, preset + if args.verbose: + print(f"\n\n{question}\n--------------------\n") + if selected_model != model_name: model_name = selected_model model = tokenizer = None