mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-29 10:59:32 +01:00
Add streaming to RWKV
This commit is contained in:
parent
70e522732c
commit
ebd698905c
@ -87,9 +87,17 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
alpha_presence = 0.25, # Presence Penalty (as in GPT-3)
|
alpha_presence = 0.25, # Presence Penalty (as in GPT-3)
|
||||||
token_ban = [0], # ban the generation of some tokens
|
token_ban = [0], # ban the generation of some tokens
|
||||||
token_stop = []) # stop generation whenever you see any token here
|
token_stop = []) # stop generation whenever you see any token here
|
||||||
reply = question + shared.model.generate(question, token_count=max_new_tokens, args=args, callback=None)
|
|
||||||
yield formatted_outputs(reply, None)
|
if shared.args.no_stream:
|
||||||
return formatted_outputs(reply, None)
|
reply = question + shared.model.generate(question, token_count=max_new_tokens, args=args, callback=None)
|
||||||
|
yield formatted_outputs(reply, None)
|
||||||
|
return formatted_outputs(reply, None)
|
||||||
|
else:
|
||||||
|
for i in range(max_new_tokens//8):
|
||||||
|
reply = question + shared.model.generate(question, token_count=8, args=args, callback=None)
|
||||||
|
yield formatted_outputs(reply, None)
|
||||||
|
question = reply
|
||||||
|
return formatted_outputs(reply, None)
|
||||||
|
|
||||||
original_question = question
|
original_question = question
|
||||||
if not (shared.args.chat or shared.args.cai_chat):
|
if not (shared.args.chat or shared.args.cai_chat):
|
||||||
|
Loading…
Reference in New Issue
Block a user