mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
Readability improvements
This commit is contained in:
parent
72d539dbff
commit
ad2970374a
@ -195,8 +195,8 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
for output in eval(f"generate_with_streaming({', '.join(generate_params)})"):
|
for output in eval(f"generate_with_streaming({', '.join(generate_params)})"):
|
||||||
if shared.soft_prompt:
|
if shared.soft_prompt:
|
||||||
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
||||||
|
|
||||||
reply = decode(output)
|
reply = decode(output)
|
||||||
|
|
||||||
if not (shared.args.chat or shared.args.cai_chat):
|
if not (shared.args.chat or shared.args.cai_chat):
|
||||||
reply = original_question + apply_extensions(reply[len(question):], "output")
|
reply = original_question + apply_extensions(reply[len(question):], "output")
|
||||||
yield formatted_outputs(reply, shared.model_name)
|
yield formatted_outputs(reply, shared.model_name)
|
||||||
@ -213,16 +213,16 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
output = eval(f"shared.model.generate({', '.join(generate_params)})")[0]
|
output = eval(f"shared.model.generate({', '.join(generate_params)})")[0]
|
||||||
if shared.soft_prompt:
|
if shared.soft_prompt:
|
||||||
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
||||||
|
|
||||||
reply = decode(output)
|
reply = decode(output)
|
||||||
|
|
||||||
if not (shared.args.chat or shared.args.cai_chat):
|
if not (shared.args.chat or shared.args.cai_chat):
|
||||||
reply = original_question + apply_extensions(reply[len(question):], "output")
|
reply = original_question + apply_extensions(reply[len(question):], "output")
|
||||||
yield formatted_outputs(reply, shared.model_name)
|
yield formatted_outputs(reply, shared.model_name)
|
||||||
|
|
||||||
if np.count_nonzero(input_ids[0] == n) < np.count_nonzero(output == n):
|
if np.count_nonzero(input_ids[0] == n) < np.count_nonzero(output == n):
|
||||||
break
|
break
|
||||||
input_ids = np.reshape(output, (1, output.shape[0]))
|
|
||||||
|
|
||||||
|
input_ids = np.reshape(output, (1, output.shape[0]))
|
||||||
if shared.soft_prompt:
|
if shared.soft_prompt:
|
||||||
inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
|
inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user