From 37a16d23a784aea52f2ce9559793dd136e051148 Mon Sep 17 00:00:00 2001 From: Panchovix Date: Wed, 28 Jun 2023 11:36:07 -0400 Subject: [PATCH] Fix exllama_hf gibbersh above 2048 context, and works >5000 context. (#2913) --- modules/exllama_hf.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/modules/exllama_hf.py b/modules/exllama_hf.py index 9beb2269..d7dada08 100644 --- a/modules/exllama_hf.py +++ b/modules/exllama_hf.py @@ -54,7 +54,15 @@ class ExllamaHF(PreTrainedModel): cache = kwargs['past_key_values'] if 'past_key_values' in kwargs else None if cache is None: cache = ExLlamaCache(self.ex_model) - self.ex_model.forward(torch.tensor([seq[:-1]], dtype=torch.long), cache, preprocess_only=True, lora=self.lora) + + nseq = seq[:-1] + for seqs in [nseq[i : i + 2048] for i in range(0, len(nseq), 2048)]: + self.ex_model.forward( + torch.tensor([seqs], dtype=torch.long), + cache, + preprocess_only=True, + lora=self.lora, + ) logits = self.ex_model.forward(torch.tensor([seq[-1:]], dtype=torch.long), cache, lora=self.lora).to(kwargs['input_ids'].device)