From 2c5a1e67f9c935c2133280e4e4f94c5752a38ea0 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 7 Dec 2023 20:04:52 -0300 Subject: [PATCH] Parameters: change max_new_tokens & repetition_penalty_range defaults (#4842) --- extensions/openai/completions.py | 2 +- extensions/openai/typing.py | 2 +- modules/presets.py | 2 +- modules/shared.py | 2 +- settings-template.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/extensions/openai/completions.py b/extensions/openai/completions.py index 389466ff..273d5334 100644 --- a/extensions/openai/completions.py +++ b/extensions/openai/completions.py @@ -236,7 +236,7 @@ def chat_completions_common(body: dict, is_legacy: bool = False, stream=False) - max_tokens = generate_params['max_new_tokens'] if max_tokens in [None, 0]: - generate_params['max_new_tokens'] = 200 + generate_params['max_new_tokens'] = 512 generate_params['auto_max_new_tokens'] = True requested_model = generate_params.pop('model') diff --git a/extensions/openai/typing.py b/extensions/openai/typing.py index 5a2d40d5..695b929a 100644 --- a/extensions/openai/typing.py +++ b/extensions/openai/typing.py @@ -10,7 +10,7 @@ class GenerationOptions(BaseModel): min_p: float = 0 top_k: int = 0 repetition_penalty: float = 1 - repetition_penalty_range: int = 0 + repetition_penalty_range: int = 1024 typical_p: float = 1 tfs: float = 1 top_a: float = 0 diff --git a/modules/presets.py b/modules/presets.py index 842992f9..15443627 100644 --- a/modules/presets.py +++ b/modules/presets.py @@ -18,7 +18,7 @@ def default_preset(): 'repetition_penalty': 1, 'presence_penalty': 0, 'frequency_penalty': 0, - 'repetition_penalty_range': 0, + 'repetition_penalty_range': 1024, 'typical_p': 1, 'tfs': 1, 'top_a': 0, diff --git a/modules/shared.py b/modules/shared.py index da1aaf2f..680cd8fb 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -36,7 +36,7 @@ settings = { 'prompt-default': 'QA', 'prompt-notebook': 'QA', 'preset': 'simple-1', - 'max_new_tokens': 200, + 'max_new_tokens': 512, 'max_new_tokens_min': 1, 'max_new_tokens_max': 4096, 'negative_prompt': '', diff --git a/settings-template.yaml b/settings-template.yaml index cb168443..5cd87e05 100644 --- a/settings-template.yaml +++ b/settings-template.yaml @@ -6,7 +6,7 @@ chat_style: cai-chat prompt-default: QA prompt-notebook: QA preset: simple-1 -max_new_tokens: 200 +max_new_tokens: 512 max_new_tokens_min: 1 max_new_tokens_max: 4096 seed: -1