mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-25 09:19:23 +01:00
Merge remote-tracking branch 'refs/remotes/origin/dev' into dev
This commit is contained in:
commit
92d0617bce
@ -5,15 +5,12 @@ instruction_template: |-
|
||||
{%- set ns.found = true -%}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- if not ns.found -%}
|
||||
{{- '<|im_start|>system\n' + '' + '<|im_end|>\n' -}}
|
||||
{%- endif %}
|
||||
{%- for message in messages %}
|
||||
{%- if message['role'] == 'system' -%}
|
||||
{{- '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' -}}
|
||||
{{- '<|im_start|>system\n' + message['content'].rstrip() + '<|im_end|>\n' -}}
|
||||
{%- else -%}
|
||||
{%- if message['role'] == 'user' -%}
|
||||
{{-'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'-}}
|
||||
{{-'<|im_start|>user\n' + message['content'].rstrip() + '<|im_end|>\n'-}}
|
||||
{%- else -%}
|
||||
{{-'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' -}}
|
||||
{%- endif -%}
|
||||
|
@ -216,7 +216,8 @@ class LlamacppHF(PreTrainedModel):
|
||||
'tensor_split': tensor_split_list,
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'logits_all': shared.args.logits_all,
|
||||
'offload_kqv': not shared.args.no_offload_kqv
|
||||
'offload_kqv': not shared.args.no_offload_kqv,
|
||||
'split_mode': 1 if not shared.args.row_split else 2
|
||||
}
|
||||
|
||||
Llama = llama_cpp_lib().Llama
|
||||
|
@ -95,7 +95,8 @@ class LlamaCppModel:
|
||||
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
||||
'tensor_split': tensor_split_list,
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'offload_kqv': not shared.args.no_offload_kqv
|
||||
'offload_kqv': not shared.args.no_offload_kqv,
|
||||
'split_mode': 1 if not shared.args.row_split else 2
|
||||
}
|
||||
|
||||
result.model = Llama(**params)
|
||||
|
@ -44,6 +44,7 @@ loaders_and_params = OrderedDict({
|
||||
'cpu',
|
||||
'numa',
|
||||
'no_offload_kqv',
|
||||
'row_split',
|
||||
'tensorcores',
|
||||
],
|
||||
'llamacpp_HF': [
|
||||
@ -66,6 +67,7 @@ loaders_and_params = OrderedDict({
|
||||
'no_use_fast',
|
||||
'logits_all',
|
||||
'no_offload_kqv',
|
||||
'row_split',
|
||||
'tensorcores',
|
||||
'llamacpp_HF_info',
|
||||
],
|
||||
|
@ -129,6 +129,7 @@ group.add_argument('--numa', action='store_true', help='Activate NUMA task alloc
|
||||
group.add_argument('--logits_all', action='store_true', help='Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.')
|
||||
group.add_argument('--no_offload_kqv', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
|
||||
group.add_argument('--cache-capacity', type=str, help='Maximum cache capacity (llama-cpp-python). Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
|
||||
group.add_argument('--row_split', action='store_true', help='Split multi-gpu by row instead of layer. Faster on some cards.')
|
||||
|
||||
# ExLlamaV2
|
||||
group = parser.add_argument_group('ExLlamaV2')
|
||||
|
@ -93,6 +93,7 @@ def list_model_elements():
|
||||
'numa',
|
||||
'logits_all',
|
||||
'no_offload_kqv',
|
||||
'row_split',
|
||||
'tensorcores',
|
||||
'hqq_backend',
|
||||
]
|
||||
|
@ -90,7 +90,7 @@ def create_ui():
|
||||
shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=shared.settings['truncation_length_max'], step=256, label="n_ctx", value=shared.args.n_ctx, info='Context length. Try lowering this if you run out of memory while loading the model.')
|
||||
shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads)
|
||||
shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=32, value=shared.args.threads_batch)
|
||||
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch)
|
||||
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, step=1, value=shared.args.n_batch)
|
||||
|
||||
shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None")
|
||||
shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None")
|
||||
@ -107,6 +107,7 @@ def create_ui():
|
||||
with gr.Column():
|
||||
shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only.')
|
||||
shared.gradio['no_offload_kqv'] = gr.Checkbox(label="no_offload_kqv", value=shared.args.no_offload_kqv, info='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
|
||||
shared.gradio['row_split'] = gr.Checkbox(label="row_split", value=shared.args.row_split, info='Split model by rows across GPUs. Improves performance on some cards.')
|
||||
shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton)
|
||||
shared.gradio['no_inject_fused_attention'] = gr.Checkbox(label="no_inject_fused_attention", value=shared.args.no_inject_fused_attention, info='Disable fused attention. Fused attention improves inference performance but uses more VRAM. Fuses layers for AutoAWQ. Disable if running low on VRAM.')
|
||||
shared.gradio['no_inject_fused_mlp'] = gr.Checkbox(label="no_inject_fused_mlp", value=shared.args.no_inject_fused_mlp, info='Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.')
|
||||
|
Loading…
Reference in New Issue
Block a user