From c5b40eb5558230fe5894cfd9388809429f5c799f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 3 Sep 2024 17:37:06 -0700 Subject: [PATCH] llama.cpp: prevent prompt evaluation progress bar with just 1 step --- modules/llama_cpp_python_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/llama_cpp_python_hijack.py b/modules/llama_cpp_python_hijack.py index 64280dc9..3d42b2d7 100644 --- a/modules/llama_cpp_python_hijack.py +++ b/modules/llama_cpp_python_hijack.py @@ -61,7 +61,7 @@ def eval_with_progress(self, tokens: Sequence[int]): assert self._batch.batch is not None self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1) - if len(tokens) > 1: + if len(tokens) > self.n_batch: progress_bar = tqdm(range(0, len(tokens), self.n_batch), desc="Prompt evaluation", leave=False) else: progress_bar = range(0, len(tokens), self.n_batch)