mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-24 08:56:52 +01:00
llama.cpp: prevent prompt evaluation progress bar with just 1 step
This commit is contained in:
parent
2cb8d4c96e
commit
c5b40eb555
@ -61,7 +61,7 @@ def eval_with_progress(self, tokens: Sequence[int]):
|
|||||||
assert self._batch.batch is not None
|
assert self._batch.batch is not None
|
||||||
self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1)
|
self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1)
|
||||||
|
|
||||||
if len(tokens) > 1:
|
if len(tokens) > self.n_batch:
|
||||||
progress_bar = tqdm(range(0, len(tokens), self.n_batch), desc="Prompt evaluation", leave=False)
|
progress_bar = tqdm(range(0, len(tokens), self.n_batch), desc="Prompt evaluation", leave=False)
|
||||||
else:
|
else:
|
||||||
progress_bar = range(0, len(tokens), self.n_batch)
|
progress_bar = range(0, len(tokens), self.n_batch)
|
||||||
|
Loading…
Reference in New Issue
Block a user