From eeafd60713e7bca5c0203d995e95794766b30c60 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 31 Mar 2023 19:05:38 -0300 Subject: [PATCH] Fix streaming --- modules/text_generation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/text_generation.py b/modules/text_generation.py index b7116d9a..6ae592db 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -130,7 +130,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi # RWKV has proper streaming, which is very nice. # No need to generate 8 tokens at a time. - for reply in shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty): + for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty): output = original_question+reply if not (shared.args.chat or shared.args.cai_chat): reply = original_question + apply_extensions(reply, "output")