mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-21 23:57:58 +01:00
Remove MinPLogitsWarper (it's now a transformers built-in)
This commit is contained in:
parent
4f1e96b9e3
commit
8df68b05e9
@ -125,36 +125,6 @@ class QuadraticSamplingLogitsWarper(LogitsWarper):
|
||||
return transformed_logits
|
||||
|
||||
|
||||
class MinPLogitsWarper(LogitsWarper):
|
||||
def __init__(self, min_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
||||
if min_p < 0 or min_p > 1.0:
|
||||
raise ValueError(f"`min_p` has to be a float >= 0 and <= 1, but is {min_p}")
|
||||
self.min_p = min_p
|
||||
self.filter_value = filter_value
|
||||
self.min_tokens_to_keep = min_tokens_to_keep
|
||||
|
||||
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
||||
# Convert logits to probabilities
|
||||
probs = torch.softmax(scores, dim=-1)
|
||||
# Get the probability of the top token for each sequence in the batch
|
||||
top_probs, _ = probs.max(dim=-1, keepdim=True)
|
||||
# Calculate the actual min_p threshold by scaling min_p with the top token's probability
|
||||
scaled_min_p = self.min_p * top_probs
|
||||
# Create a mask for tokens that have a probability less than the scaled min_p
|
||||
tokens_to_remove = probs < scaled_min_p
|
||||
|
||||
sorted_indices = torch.argsort(scores, descending=True, dim=-1)
|
||||
sorted_indices_to_remove = torch.gather(tokens_to_remove, dim=-1, index=sorted_indices)
|
||||
|
||||
if self.min_tokens_to_keep > 1:
|
||||
# Keep at least min_tokens_to_keep
|
||||
sorted_indices_to_remove[..., : self.min_tokens_to_keep] = False
|
||||
|
||||
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
|
||||
scores = scores.masked_fill(indices_to_remove, self.filter_value)
|
||||
return scores
|
||||
|
||||
|
||||
class TailFreeLogitsWarper(LogitsWarper):
|
||||
def __init__(self, tfs: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
||||
tfs = float(tfs)
|
||||
@ -421,14 +391,6 @@ def get_logits_warper_patch(self, generation_config):
|
||||
)
|
||||
)
|
||||
|
||||
if generation_config.min_p is not None and 0.0 < generation_config.min_p <= 1.0:
|
||||
warpers_to_add.append(
|
||||
MinPLogitsWarper(
|
||||
min_p=generation_config.min_p,
|
||||
min_tokens_to_keep=min_tokens_to_keep
|
||||
)
|
||||
)
|
||||
|
||||
if generation_config.dynamic_temperature:
|
||||
warpers_to_add.append(
|
||||
DynamicTemperatureLogitsWarper(
|
||||
|
Loading…
Reference in New Issue
Block a user