mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
Avoid redundant function call in llamacpp_hf
This commit is contained in:
parent
9b7646140c
commit
029da9563f
@ -131,9 +131,10 @@ class LlamacppHF(PreTrainedModel):
|
|||||||
longest_prefix = min_length
|
longest_prefix = min_length
|
||||||
|
|
||||||
if longest_prefix > 0:
|
if longest_prefix > 0:
|
||||||
self.model.n_tokens = longest_prefix
|
|
||||||
self.model.eval(seq[longest_prefix:])
|
|
||||||
reset = False
|
reset = False
|
||||||
|
self.model.n_tokens = longest_prefix
|
||||||
|
if len(seq_tensor) - longest_prefix > 0:
|
||||||
|
self.model.eval(seq[longest_prefix:])
|
||||||
|
|
||||||
if reset:
|
if reset:
|
||||||
self.model.reset()
|
self.model.reset()
|
||||||
|
Loading…
Reference in New Issue
Block a user