mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-26 01:30:20 +01:00
Add --model-menu option
This commit is contained in:
parent
86c10c6f0c
commit
7dfbe54f42
@ -203,6 +203,7 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--lora LORA` | Name of the LoRA to apply to the model by default. |
|
| `--lora LORA` | Name of the LoRA to apply to the model by default. |
|
||||||
| `--model-dir MODEL_DIR` | Path to directory with all the models. |
|
| `--model-dir MODEL_DIR` | Path to directory with all the models. |
|
||||||
| `--lora-dir LORA_DIR` | Path to directory with all the loras. |
|
| `--lora-dir LORA_DIR` | Path to directory with all the loras. |
|
||||||
|
| `--model-menu` | Show a model menu in the terminal when the web UI is first launched. |
|
||||||
| `--no-stream` | Don't stream the text output in real time. |
|
| `--no-stream` | Don't stream the text output in real time. |
|
||||||
| `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example. If you create a file called `settings.json`, this file will be loaded by default without the need to use the `--settings` flag. |
|
| `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example. If you create a file called `settings.json`, this file will be loaded by default without the need to use the `--settings` flag. |
|
||||||
| `--extensions EXTENSIONS [EXTENSIONS ...]` | The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. |
|
| `--extensions EXTENSIONS [EXTENSIONS ...]` | The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. |
|
||||||
|
@ -90,6 +90,7 @@ parser.add_argument('--model', type=str, help='Name of the model to load by defa
|
|||||||
parser.add_argument('--lora', type=str, help='Name of the LoRA to apply to the model by default.')
|
parser.add_argument('--lora', type=str, help='Name of the LoRA to apply to the model by default.')
|
||||||
parser.add_argument("--model-dir", type=str, default='models/', help="Path to directory with all the models")
|
parser.add_argument("--model-dir", type=str, default='models/', help="Path to directory with all the models")
|
||||||
parser.add_argument("--lora-dir", type=str, default='loras/', help="Path to directory with all the loras")
|
parser.add_argument("--lora-dir", type=str, default='loras/', help="Path to directory with all the loras")
|
||||||
|
parser.add_argument('--model-menu', action='store_true', help='Show a model menu in the terminal when the web UI is first launched.')
|
||||||
parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time.')
|
parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time.')
|
||||||
parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example. If you create a file called settings.json, this file will be loaded by default without the need to use the --settings flag.')
|
parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example. If you create a file called settings.json, this file will be loaded by default without the need to use the --settings flag.')
|
||||||
parser.add_argument('--extensions', type=str, nargs="+", help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
|
parser.add_argument('--extensions', type=str, nargs="+", help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
|
||||||
|
19
server.py
19
server.py
@ -8,6 +8,7 @@ import json
|
|||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import sys
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import zipfile
|
import zipfile
|
||||||
@ -453,7 +454,23 @@ else:
|
|||||||
if shared.args.model is not None:
|
if shared.args.model is not None:
|
||||||
shared.model_name = shared.args.model
|
shared.model_name = shared.args.model
|
||||||
shared.model, shared.tokenizer = load_model(shared.model_name)
|
shared.model, shared.tokenizer = load_model(shared.model_name)
|
||||||
if shared.args.lora:
|
elif shared.args.model_menu:
|
||||||
|
if len(available_models) == 0:
|
||||||
|
print('No models are available! Please download at least one.')
|
||||||
|
sys.exit(0)
|
||||||
|
elif len(available_models) == 1:
|
||||||
|
i = 0
|
||||||
|
else:
|
||||||
|
print('The following models are available:\n')
|
||||||
|
for i, model in enumerate(available_models):
|
||||||
|
print(f'{i+1}. {model}')
|
||||||
|
print(f'\nWhich one do you want to load? 1-{len(available_models)}\n')
|
||||||
|
i = int(input()) - 1
|
||||||
|
print()
|
||||||
|
shared.model_name = available_models[i]
|
||||||
|
shared.model, shared.tokenizer = load_model(shared.model_name)
|
||||||
|
|
||||||
|
if shared.args.model is not None and shared.args.lora:
|
||||||
add_lora_to_model(shared.args.lora)
|
add_lora_to_model(shared.args.lora)
|
||||||
|
|
||||||
# Default UI settings
|
# Default UI settings
|
||||||
|
Loading…
Reference in New Issue
Block a user