Instruction Character Vicuna, Instruction Mode Bugfix (#838)

This commit is contained in:
OWKenobi 2023-04-06 22:40:44 +02:00 committed by GitHub
parent 20b8ca4482
commit 310bf46a94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 15 additions and 2 deletions

View File

@ -0,0 +1,3 @@
name: "### Assistant:"
your_name: "### Human:"
context: "Below is an instruction that describes a task. Write a response that appropriately completes the request."

View File

@ -99,6 +99,11 @@ def extract_message_from_reply(reply, name1, name2, stop_at_newline):
return reply, next_character_found return reply, next_character_found
def chatbot_wrapper(text, generate_state, name1, name2, context, mode, end_of_turn, regenerate=False): def chatbot_wrapper(text, generate_state, name1, name2, context, mode, end_of_turn, regenerate=False):
if mode == 'instruct':
stopping_strings = [f"\n{name1}", f"\n{name2}"]
else:
stopping_strings = [f"\n{name1}:", f"\n{name2}:"]
eos_token = '\n' if generate_state['stop_at_newline'] else None eos_token = '\n' if generate_state['stop_at_newline'] else None
name1_original = name1 name1_original = name1
if 'pygmalion' in shared.model_name.lower(): if 'pygmalion' in shared.model_name.lower():
@ -133,7 +138,7 @@ def chatbot_wrapper(text, generate_state, name1, name2, context, mode, end_of_tu
just_started = True just_started = True
for i in range(generate_state['chat_generation_attempts']): for i in range(generate_state['chat_generation_attempts']):
reply = None reply = None
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_state, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]): for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_state, eos_token=eos_token, stopping_strings=stopping_strings):
reply = cumulative_reply + reply reply = cumulative_reply + reply
# Extracting the reply # Extracting the reply
@ -163,6 +168,11 @@ def chatbot_wrapper(text, generate_state, name1, name2, context, mode, end_of_tu
yield shared.history['visible'] yield shared.history['visible']
def impersonate_wrapper(text, generate_state, name1, name2, context, mode, end_of_turn): def impersonate_wrapper(text, generate_state, name1, name2, context, mode, end_of_turn):
if mode == 'instruct':
stopping_strings = [f"\n{name1}", f"\n{name2}"]
else:
stopping_strings = [f"\n{name1}:", f"\n{name2}:"]
eos_token = '\n' if generate_state['stop_at_newline'] else None eos_token = '\n' if generate_state['stop_at_newline'] else None
if 'pygmalion' in shared.model_name.lower(): if 'pygmalion' in shared.model_name.lower():
name1 = "You" name1 = "You"
@ -175,7 +185,7 @@ def impersonate_wrapper(text, generate_state, name1, name2, context, mode, end_o
cumulative_reply = '' cumulative_reply = ''
for i in range(generate_state['chat_generation_attempts']): for i in range(generate_state['chat_generation_attempts']):
reply = None reply = None
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_state, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]): for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_state, eos_token=eos_token, stopping_strings=stopping_strings):
reply = cumulative_reply + reply reply = cumulative_reply + reply
reply, next_character_found = extract_message_from_reply(reply, name1, name2, generate_state['stop_at_newline']) reply, next_character_found = extract_message_from_reply(reply, name1, name2, generate_state['stop_at_newline'])
yield reply yield reply