From 37f0166b2d6b0f2938a5a4c1762479829de1c5be Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sat, 11 Mar 2023 23:14:49 -0300 Subject: [PATCH] Fix memory leak in new streaming (second attempt) --- modules/callbacks.py | 5 ++++- modules/text_generation.py | 1 - 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/callbacks.py b/modules/callbacks.py index 15674b8a..05e8fafa 100644 --- a/modules/callbacks.py +++ b/modules/callbacks.py @@ -49,7 +49,7 @@ class Iteratorize: def __init__(self, func, kwargs={}, callback=None): self.mfunc=func self.c_callback=callback - self.q = Queue(maxsize=1) + self.q = Queue() self.sentinel = object() self.kwargs = kwargs @@ -73,3 +73,6 @@ class Iteratorize: raise StopIteration else: return obj + + def __del__(self): + pass diff --git a/modules/text_generation.py b/modules/text_generation.py index 6a59f9a7..5d01c8cb 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -187,7 +187,6 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi yield formatted_outputs(original_question, shared.model_name) for output in eval(f"generate_with_streaming({', '.join(generate_params)})"): - print(print('Used vram in gib:', torch.cuda.memory_allocated() / 1024**3)) if shared.soft_prompt: output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:])) reply = decode(output)