mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
Make it possible to evaluate exllama perplexity (#3138)
This commit is contained in:
parent
b284f2407d
commit
94dfcec237
@ -29,6 +29,7 @@ class ExllamaHF(PreTrainedModel):
|
||||
super().__init__(PretrainedConfig())
|
||||
self.ex_config = config
|
||||
self.ex_model = ExLlama(self.ex_config)
|
||||
self.ex_cache = ExLlamaCache(self.ex_model)
|
||||
self.generation_config = GenerationConfig()
|
||||
self.lora = None
|
||||
|
||||
@ -52,11 +53,20 @@ class ExllamaHF(PreTrainedModel):
|
||||
labels = kwargs.get('labels', None)
|
||||
seq = kwargs['input_ids'][0].tolist()
|
||||
cache = kwargs['past_key_values'] if 'past_key_values' in kwargs else None
|
||||
if cache is None:
|
||||
cache = ExLlamaCache(self.ex_model)
|
||||
self.ex_model.forward(torch.tensor([seq[:-1]], dtype=torch.long), cache, preprocess_only=True, lora=self.lora)
|
||||
|
||||
logits = self.ex_model.forward(torch.tensor([seq[-1:]], dtype=torch.long), cache, lora=self.lora).to(kwargs['input_ids'].device)
|
||||
if labels is None:
|
||||
if cache is None:
|
||||
self.ex_cache.current_seq_len = 0
|
||||
cache = self.ex_cache
|
||||
self.ex_model.forward(torch.tensor([seq[:-1]], dtype=torch.long), cache, preprocess_only=True, lora=self.lora)
|
||||
|
||||
logits = self.ex_model.forward(torch.tensor([seq[-1:]], dtype=torch.long), cache, lora=self.lora).to(kwargs['input_ids'].device)
|
||||
else:
|
||||
if cache is None:
|
||||
self.ex_cache.current_seq_len = 0
|
||||
cache = self.ex_cache
|
||||
|
||||
logits = self.ex_model.forward(torch.tensor([seq], dtype=torch.long), cache, last_id_only=False, lora=self.lora)
|
||||
|
||||
loss = None
|
||||
if labels is not None:
|
||||
@ -71,7 +81,7 @@ class ExllamaHF(PreTrainedModel):
|
||||
shift_labels = shift_labels.to(shift_logits.device)
|
||||
loss = loss_fct(shift_logits, shift_labels)
|
||||
|
||||
return CausalLMOutputWithPast(logits=logits, past_key_values=cache if use_cache else None)
|
||||
return CausalLMOutputWithPast(logits=logits, past_key_values=cache if use_cache else None, loss=loss)
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
|
||||
|
Loading…
Reference in New Issue
Block a user