Parameters: change max_new_tokens & repetition_penalty_range defaults (#4842)

This commit is contained in:
oobabooga 2023-12-07 20:04:52 -03:00 committed by GitHub
parent e16e5997ef
commit 2c5a1e67f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 5 additions and 5 deletions

View File

@ -236,7 +236,7 @@ def chat_completions_common(body: dict, is_legacy: bool = False, stream=False) -
max_tokens = generate_params['max_new_tokens']
if max_tokens in [None, 0]:
generate_params['max_new_tokens'] = 200
generate_params['max_new_tokens'] = 512
generate_params['auto_max_new_tokens'] = True
requested_model = generate_params.pop('model')

View File

@ -10,7 +10,7 @@ class GenerationOptions(BaseModel):
min_p: float = 0
top_k: int = 0
repetition_penalty: float = 1
repetition_penalty_range: int = 0
repetition_penalty_range: int = 1024
typical_p: float = 1
tfs: float = 1
top_a: float = 0

View File

@ -18,7 +18,7 @@ def default_preset():
'repetition_penalty': 1,
'presence_penalty': 0,
'frequency_penalty': 0,
'repetition_penalty_range': 0,
'repetition_penalty_range': 1024,
'typical_p': 1,
'tfs': 1,
'top_a': 0,

View File

@ -36,7 +36,7 @@ settings = {
'prompt-default': 'QA',
'prompt-notebook': 'QA',
'preset': 'simple-1',
'max_new_tokens': 200,
'max_new_tokens': 512,
'max_new_tokens_min': 1,
'max_new_tokens_max': 4096,
'negative_prompt': '',

View File

@ -6,7 +6,7 @@ chat_style: cai-chat
prompt-default: QA
prompt-notebook: QA
preset: simple-1
max_new_tokens: 200
max_new_tokens: 512
max_new_tokens_min: 1
max_new_tokens_max: 4096
seed: -1