diff --git a/modules/shared.py b/modules/shared.py index 0ca9efb9..ee06b5a7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -42,7 +42,7 @@ settings = { 'negative_prompt': '', 'truncation_length': 2048, 'truncation_length_min': 0, - 'truncation_length_max': 16384, + 'truncation_length_max': 32768, 'custom_stopping_strings': '', 'auto_max_new_tokens': False, 'max_tokens_second': 0, diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index a2bb1695..b0f98e0d 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -81,7 +81,7 @@ def create_ui(): shared.gradio['quant_type'] = gr.Dropdown(label="quant_type", choices=["nf4", "fp4"], value=shared.args.quant_type) shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=128, value=shared.args.n_gpu_layers) - shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=16384, step=256, label="n_ctx", value=shared.args.n_ctx) + shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=32768, step=256, label="n_ctx", value=shared.args.n_ctx) shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads) shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch) @@ -91,7 +91,7 @@ def create_ui(): shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0) shared.gradio['autogptq_info'] = gr.Markdown('* ExLlama_HF is recommended over AutoGPTQ for models derived from LLaMA.') shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7') - shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=16384, step=256, info='Maximum sequence length.', value=shared.args.max_seq_len) + shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=32768, step=256, info='Maximum sequence length.', value=shared.args.max_seq_len) shared.gradio['alpha_value'] = gr.Slider(label='alpha_value', minimum=1, maximum=8, step=0.1, info='Positional embeddings alpha factor for NTK RoPE scaling. Use either this or compress_pos_emb, not both.', value=shared.args.alpha_value) shared.gradio['rope_freq_base'] = gr.Slider(label='rope_freq_base', minimum=0, maximum=1000000, step=1000, info='If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63)', value=shared.args.rope_freq_base) shared.gradio['compress_pos_emb'] = gr.Slider(label='compress_pos_emb', minimum=1, maximum=8, step=1, info='Positional embeddings compression factor. Should be set to (context length) / (model\'s original context length). Equal to 1/rope_freq_scale.', value=shared.args.compress_pos_emb) diff --git a/settings-template.yaml b/settings-template.yaml index 0696f503..12eb38f8 100644 --- a/settings-template.yaml +++ b/settings-template.yaml @@ -13,7 +13,7 @@ seed: -1 negative_prompt: '' truncation_length: 2048 truncation_length_min: 0 -truncation_length_max: 16384 +truncation_length_max: 32768 custom_stopping_strings: '' auto_max_new_tokens: false max_tokens_second: 0