mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-25 01:09:22 +01:00
Allow specifying your own profile picture in chat mode
This commit is contained in:
parent
3fa14befc5
commit
fc73188ec7
@ -113,6 +113,8 @@ Then browse to
|
|||||||
|
|
||||||
`http://localhost:7860/?__theme=dark`
|
`http://localhost:7860/?__theme=dark`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Optionally, you can use the following command-line flags:
|
Optionally, you can use the following command-line flags:
|
||||||
|
|
||||||
| Flag | Description |
|
| Flag | Description |
|
||||||
@ -121,7 +123,7 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--model MODEL` | Name of the model to load by default. |
|
| `--model MODEL` | Name of the model to load by default. |
|
||||||
| `--notebook` | Launch the web UI in notebook mode, where the output is written to the same text box as the input. |
|
| `--notebook` | Launch the web UI in notebook mode, where the output is written to the same text box as the input. |
|
||||||
| `--chat` | Launch the web UI in chat mode.|
|
| `--chat` | Launch the web UI in chat mode.|
|
||||||
| `--cai-chat` | Launch the web UI in chat mode with a style similar to Character.AI's. If the file profile.png or profile.jpg exists in the same folder as server.py, this image will be used as the bot's profile picture. |
|
| `--cai-chat` | Launch the web UI in chat mode with a style similar to Character.AI's. If the file img-bot.png or img-bot.jpg exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, img-you.png or img-you.jpg will be used as your profile picture. |
|
||||||
| `--cpu` | Use the CPU to generate text.|
|
| `--cpu` | Use the CPU to generate text.|
|
||||||
| `--load-in-8bit` | Load the model with 8-bit precision.|
|
| `--load-in-8bit` | Load the model with 8-bit precision.|
|
||||||
| `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.|
|
| `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.|
|
||||||
|
@ -196,7 +196,7 @@ def generate_chat_html(history, name1, name2, character):
|
|||||||
border-radius: 50%;
|
border-radius: 50%;
|
||||||
}
|
}
|
||||||
|
|
||||||
.circle-bot img {
|
.circle-bot img, .circle-you img {
|
||||||
border-radius: 50%;
|
border-radius: 50%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
@ -225,15 +225,21 @@ def generate_chat_html(history, name1, name2, character):
|
|||||||
f"characters/{character}.png",
|
f"characters/{character}.png",
|
||||||
f"characters/{character}.jpg",
|
f"characters/{character}.jpg",
|
||||||
f"characters/{character}.jpeg",
|
f"characters/{character}.jpeg",
|
||||||
"profile.png",
|
"img_bot.png",
|
||||||
"profile.jpg",
|
"img_bot.jpg",
|
||||||
"profile.jpeg",
|
"img_bot.jpeg"
|
||||||
]:
|
]:
|
||||||
|
|
||||||
if Path(i).exists():
|
if Path(i).exists():
|
||||||
img = f'<img src="file/{i}">'
|
img = f'<img src="file/{i}">'
|
||||||
break
|
break
|
||||||
|
|
||||||
|
img_you = ''
|
||||||
|
for i in ["img_me.png", "img_me.jpg", "img_me.jpeg"]:
|
||||||
|
if Path(i).exists():
|
||||||
|
img_you = f'<img src="file/{i}">'
|
||||||
|
break
|
||||||
|
|
||||||
for i,_row in enumerate(history[::-1]):
|
for i,_row in enumerate(history[::-1]):
|
||||||
row = _row.copy()
|
row = _row.copy()
|
||||||
row[0] = re.sub(r"[\\]*\*", r"*", row[0])
|
row[0] = re.sub(r"[\\]*\*", r"*", row[0])
|
||||||
@ -262,6 +268,7 @@ def generate_chat_html(history, name1, name2, character):
|
|||||||
output += f"""
|
output += f"""
|
||||||
<div class="message">
|
<div class="message">
|
||||||
<div class="circle-you">
|
<div class="circle-you">
|
||||||
|
{img_you}
|
||||||
</div>
|
</div>
|
||||||
<div class="text">
|
<div class="text">
|
||||||
<div class="username">
|
<div class="username">
|
||||||
|
@ -20,7 +20,7 @@ filelock==3.9.0
|
|||||||
fonttools==4.38.0
|
fonttools==4.38.0
|
||||||
frozenlist==1.3.3
|
frozenlist==1.3.3
|
||||||
fsspec==2022.11.0
|
fsspec==2022.11.0
|
||||||
gradio==3.16.2
|
gradio==3.15.0
|
||||||
h11==0.14.0
|
h11==0.14.0
|
||||||
httpcore==0.16.3
|
httpcore==0.16.3
|
||||||
httpx==0.23.1
|
httpx==0.23.1
|
||||||
|
@ -22,7 +22,7 @@ parser = argparse.ArgumentParser()
|
|||||||
parser.add_argument('--model', type=str, help='Name of the model to load by default.')
|
parser.add_argument('--model', type=str, help='Name of the model to load by default.')
|
||||||
parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
|
parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
|
||||||
parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode.')
|
parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode.')
|
||||||
parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file profile.png or profile.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture.')
|
parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img-bot.png or img-bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img-you.png or img-you.jpg will be used as your profile picture.')
|
||||||
parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
|
parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
|
||||||
parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
|
parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
|
||||||
parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
|
parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
|
||||||
@ -80,7 +80,6 @@ def load_model(model_name):
|
|||||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), device_map='auto', load_in_8bit=True)
|
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), device_map='auto', load_in_8bit=True)
|
||||||
else:
|
else:
|
||||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.float16).cuda()
|
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.float16).cuda()
|
||||||
|
|
||||||
# Custom
|
# Custom
|
||||||
else:
|
else:
|
||||||
settings = ["low_cpu_mem_usage=True"]
|
settings = ["low_cpu_mem_usage=True"]
|
||||||
@ -186,8 +185,9 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok
|
|||||||
t = encode(stopping_string, 0, add_special_tokens=False)
|
t = encode(stopping_string, 0, add_special_tokens=False)
|
||||||
stopping_criteria_list = transformers.StoppingCriteriaList([
|
stopping_criteria_list = transformers.StoppingCriteriaList([
|
||||||
_SentinelTokenStoppingCriteria(
|
_SentinelTokenStoppingCriteria(
|
||||||
sentinel_token_ids=t,
|
sentinel_token_ids=t,
|
||||||
starting_idx=len(input_ids[0]))
|
starting_idx=len(input_ids[0])
|
||||||
|
)
|
||||||
])
|
])
|
||||||
else:
|
else:
|
||||||
stopping_criteria_list = None
|
stopping_criteria_list = None
|
||||||
@ -366,7 +366,6 @@ if args.chat or args.cai_chat:
|
|||||||
load_character(_character, name1, name2)
|
load_character(_character, name1, name2)
|
||||||
else:
|
else:
|
||||||
history = []
|
history = []
|
||||||
|
|
||||||
_history = remove_example_dialogue_from_history(history)
|
_history = remove_example_dialogue_from_history(history)
|
||||||
if args.cai_chat:
|
if args.cai_chat:
|
||||||
return generate_chat_html(_history, name1, name2, character)
|
return generate_chat_html(_history, name1, name2, character)
|
||||||
|
Loading…
Reference in New Issue
Block a user