mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
Add llama-2-70b GGML support (#3285)
This commit is contained in:
parent
6f4830b4d3
commit
a07d070b6c
@ -247,6 +247,8 @@ Optionally, you can use the following command-line flags:
|
||||
| `--n-gpu-layers N_GPU_LAYERS` | Number of layers to offload to the GPU. Only works if llama-cpp-python was compiled with BLAS. Set this to 1000000000 to offload all layers to the GPU. |
|
||||
| `--n_ctx N_CTX` | Size of the prompt context. |
|
||||
| `--llama_cpp_seed SEED` | Seed for llama-cpp models. Default 0 (random). |
|
||||
| `--n_gqa N_GQA` | grouped-query attention. Must be 8 for llama2 70b. |
|
||||
| `--rms_norm_eps RMS_NORM_EPS` | Must be 1e-5 for llama2 70b. |
|
||||
|
||||
#### AutoGPTQ
|
||||
|
||||
|
@ -106,6 +106,8 @@ class LlamacppHF(PreTrainedModel):
|
||||
'n_gpu_layers': shared.args.n_gpu_layers,
|
||||
'rope_freq_base': 10000 * shared.args.alpha_value ** (64/63.),
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'n_gqa': shared.args.n_gqa or None,
|
||||
'rms_norm_eps': shared.args.rms_norm_eps or None,
|
||||
'logits_all': True,
|
||||
}
|
||||
|
||||
|
@ -53,6 +53,8 @@ class LlamaCppModel:
|
||||
'n_gpu_layers': shared.args.n_gpu_layers,
|
||||
'rope_freq_base': 10000 * shared.args.alpha_value ** (64/63.),
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'n_gqa': shared.args.n_gqa or None,
|
||||
'rms_norm_eps': shared.args.rms_norm_eps or None,
|
||||
}
|
||||
|
||||
result.model = Llama(**params)
|
||||
|
@ -30,6 +30,8 @@ loaders_and_params = {
|
||||
],
|
||||
'llama.cpp': [
|
||||
'n_ctx',
|
||||
'n_gqa',
|
||||
'rms_norm_eps',
|
||||
'n_gpu_layers',
|
||||
'n_batch',
|
||||
'threads',
|
||||
@ -42,6 +44,8 @@ loaders_and_params = {
|
||||
],
|
||||
'llamacpp_HF': [
|
||||
'n_ctx',
|
||||
'n_gqa',
|
||||
'rms_norm_eps',
|
||||
'n_gpu_layers',
|
||||
'n_batch',
|
||||
'threads',
|
||||
|
@ -127,6 +127,8 @@ parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity.
|
||||
parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
||||
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
|
||||
parser.add_argument('--n_gqa', type=int, default=0, help='grouped-query attention. Must be 8 for llama2 70b.')
|
||||
parser.add_argument('--rms_norm_eps', type=float, default=0, help='Must be 1e-5 for llama2 70b.')
|
||||
|
||||
# GPTQ
|
||||
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
||||
|
@ -61,6 +61,8 @@ def list_model_elements():
|
||||
'mlock',
|
||||
'n_gpu_layers',
|
||||
'n_ctx',
|
||||
'n_gqa',
|
||||
'rms_norm_eps',
|
||||
'llama_cpp_seed',
|
||||
'gpu_split',
|
||||
'max_seq_len',
|
||||
|
@ -27,8 +27,8 @@ https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.3.0/auto_gptq-0.3.0+cu
|
||||
https://github.com/jllllll/exllama/releases/download/0.0.8/exllama-0.0.8+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||
https://github.com/jllllll/exllama/releases/download/0.0.8/exllama-0.0.8+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||
# llama-cpp-python without GPU support
|
||||
llama-cpp-python==0.1.74; platform_system != "Windows"
|
||||
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.74/llama_cpp_python-0.1.74-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||
llama-cpp-python==0.1.77; platform_system != "Windows"
|
||||
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.77/llama_cpp_python-0.1.77-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||
# llama-cpp-python with CUDA support
|
||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.74+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.74+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.77+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.77+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||
|
@ -219,6 +219,8 @@ def create_model_menus():
|
||||
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch)
|
||||
shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=128, value=shared.args.n_gpu_layers)
|
||||
shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=16384, step=256, label="n_ctx", value=shared.args.n_ctx)
|
||||
shared.gradio['n_gqa'] = gr.Slider(minimum=0, maximum=16, step=1, label="n_gqa", value=shared.args.n_gqa, info='grouped-query attention. Must be 8 for llama2 70b.')
|
||||
shared.gradio['rms_norm_eps'] = gr.Slider(minimum=0, maximum=1e-5, step=1e-6, label="rms_norm_eps", value=shared.args.n_gqa, info='Must be 1e-5 for llama2 70b.')
|
||||
shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=str(shared.args.wbits) if shared.args.wbits > 0 else "None")
|
||||
shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=str(shared.args.groupsize) if shared.args.groupsize > 0 else "None")
|
||||
shared.gradio['model_type'] = gr.Dropdown(label="model_type", choices=["None", "llama", "opt", "gptj"], value=shared.args.model_type or "None")
|
||||
|
Loading…
Reference in New Issue
Block a user