mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-29 19:09:32 +01:00
Merge branch 'oobabooga:main' into patch-1
This commit is contained in:
commit
ff290c6f66
@ -135,6 +135,7 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example.|
|
| `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example.|
|
||||||
| `--listen` | Make the web UI reachable from your local network.|
|
| `--listen` | Make the web UI reachable from your local network.|
|
||||||
| `--share` | Create a public URL. This is useful for running the web UI on Google Colab or similar. |
|
| `--share` | Create a public URL. This is useful for running the web UI on Google Colab or similar. |
|
||||||
|
| `--verbose` | Print the prompts to the terminal. |
|
||||||
|
|
||||||
Out of memory errors? [Check this guide](https://github.com/oobabooga/text-generation-webui/wiki/Low-VRAM-guide).
|
Out of memory errors? [Check this guide](https://github.com/oobabooga/text-generation-webui/wiki/Low-VRAM-guide).
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the
|
|||||||
parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example.')
|
parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example.')
|
||||||
parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
|
parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
|
||||||
parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
|
parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
|
||||||
|
parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if (args.chat or args.cai_chat) and not args.no_stream:
|
if (args.chat or args.cai_chat) and not args.no_stream:
|
||||||
@ -164,6 +165,9 @@ def formatted_outputs(reply, model_name):
|
|||||||
def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None, stopping_string=None):
|
def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None, stopping_string=None):
|
||||||
global model, tokenizer, model_name, loaded_preset, preset
|
global model, tokenizer, model_name, loaded_preset, preset
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
print(f"\n\n{question}\n--------------------\n")
|
||||||
|
|
||||||
if selected_model != model_name:
|
if selected_model != model_name:
|
||||||
model_name = selected_model
|
model_name = selected_model
|
||||||
model = tokenizer = None
|
model = tokenizer = None
|
||||||
|
Loading…
Reference in New Issue
Block a user