mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-26 01:30:20 +01:00
Use a list of eos_tokens instead of just a number
This might be the cause of LLaMA ramblings that some people have experienced.
This commit is contained in:
parent
17210ff88f
commit
c7aa51faa6
@ -119,7 +119,9 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
original_input_ids = input_ids
|
original_input_ids = input_ids
|
||||||
output = input_ids[0]
|
output = input_ids[0]
|
||||||
cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()"
|
cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()"
|
||||||
n = shared.tokenizer.eos_token_id if eos_token is None else int(encode(eos_token)[0][-1])
|
eos_token_ids = [shared.tokenizer.eos_token_id]
|
||||||
|
if eos_token is not None:
|
||||||
|
eos_token_ids.append(int(encode(eos_token)[0][-1]))
|
||||||
stopping_criteria_list = transformers.StoppingCriteriaList()
|
stopping_criteria_list = transformers.StoppingCriteriaList()
|
||||||
if stopping_string is not None:
|
if stopping_string is not None:
|
||||||
# Copied from https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py
|
# Copied from https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py
|
||||||
@ -129,7 +131,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
if not shared.args.flexgen:
|
if not shared.args.flexgen:
|
||||||
generate_params = [
|
generate_params = [
|
||||||
f"max_new_tokens=max_new_tokens",
|
f"max_new_tokens=max_new_tokens",
|
||||||
f"eos_token_id={n}",
|
f"eos_token_id={eos_token_ids}",
|
||||||
f"stopping_criteria=stopping_criteria_list",
|
f"stopping_criteria=stopping_criteria_list",
|
||||||
f"do_sample={do_sample}",
|
f"do_sample={do_sample}",
|
||||||
f"temperature={temperature}",
|
f"temperature={temperature}",
|
||||||
@ -149,7 +151,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
f"max_new_tokens={max_new_tokens if shared.args.no_stream else 8}",
|
f"max_new_tokens={max_new_tokens if shared.args.no_stream else 8}",
|
||||||
f"do_sample={do_sample}",
|
f"do_sample={do_sample}",
|
||||||
f"temperature={temperature}",
|
f"temperature={temperature}",
|
||||||
f"stop={n}",
|
f"stop={eos_token_ids[-1]}",
|
||||||
]
|
]
|
||||||
if shared.args.deepspeed:
|
if shared.args.deepspeed:
|
||||||
generate_params.append("synced_gpus=True")
|
generate_params.append("synced_gpus=True")
|
||||||
@ -198,7 +200,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
if not (shared.args.chat or shared.args.cai_chat):
|
if not (shared.args.chat or shared.args.cai_chat):
|
||||||
reply = original_question + apply_extensions(reply[len(question):], "output")
|
reply = original_question + apply_extensions(reply[len(question):], "output")
|
||||||
|
|
||||||
if output[-1] == n:
|
if output[-1] in eos_token_ids:
|
||||||
break
|
break
|
||||||
yield formatted_outputs(reply, shared.model_name)
|
yield formatted_outputs(reply, shared.model_name)
|
||||||
|
|
||||||
@ -219,7 +221,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
if not (shared.args.chat or shared.args.cai_chat):
|
if not (shared.args.chat or shared.args.cai_chat):
|
||||||
reply = original_question + apply_extensions(reply[len(question):], "output")
|
reply = original_question + apply_extensions(reply[len(question):], "output")
|
||||||
|
|
||||||
if np.count_nonzero(input_ids[0] == n) < np.count_nonzero(output == n):
|
if np.count_nonzero(np.isin(input_ids[0], eos_token_ids)) < np.count_nonzero(np.isin(output, eos_token_ids)):
|
||||||
break
|
break
|
||||||
yield formatted_outputs(reply, shared.model_name)
|
yield formatted_outputs(reply, shared.model_name)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user