From 20740ab16ea9c83c3eb45accc4a72cbf14cdafdb Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 28 Jun 2023 18:10:34 -0300 Subject: [PATCH] Revert "Fix exllama_hf gibbersh above 2048 context, and works >5000 context. (#2913)" This reverts commit 37a16d23a784aea52f2ce9559793dd136e051148. --- modules/exllama_hf.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/modules/exllama_hf.py b/modules/exllama_hf.py index d7dada08..9beb2269 100644 --- a/modules/exllama_hf.py +++ b/modules/exllama_hf.py @@ -54,15 +54,7 @@ class ExllamaHF(PreTrainedModel): cache = kwargs['past_key_values'] if 'past_key_values' in kwargs else None if cache is None: cache = ExLlamaCache(self.ex_model) - - nseq = seq[:-1] - for seqs in [nseq[i : i + 2048] for i in range(0, len(nseq), 2048)]: - self.ex_model.forward( - torch.tensor([seqs], dtype=torch.long), - cache, - preprocess_only=True, - lora=self.lora, - ) + self.ex_model.forward(torch.tensor([seq[:-1]], dtype=torch.long), cache, preprocess_only=True, lora=self.lora) logits = self.ex_model.forward(torch.tensor([seq[-1:]], dtype=torch.long), cache, lora=self.lora).to(kwargs['input_ids'].device)