mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
Add llama.cpp GPU offload option (#2060)
This commit is contained in:
parent
eee986348c
commit
071f0776ad
@ -230,6 +230,7 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--n_batch` | Maximum number of prompt tokens to batch together when calling llama_eval. |
|
| `--n_batch` | Maximum number of prompt tokens to batch together when calling llama_eval. |
|
||||||
| `--no-mmap` | Prevent mmap from being used. |
|
| `--no-mmap` | Prevent mmap from being used. |
|
||||||
| `--mlock` | Force the system to keep the model in RAM. |
|
| `--mlock` | Force the system to keep the model in RAM. |
|
||||||
|
| `--n-gpu-layers N_GPU_LAYERS` | Number of layers to offload to the GPU. Only works if llama-cpp-python was compiled with BLAS. Set this to 1000000000 to offload all layers to the GPU. |
|
||||||
|
|
||||||
#### GPTQ
|
#### GPTQ
|
||||||
|
|
||||||
|
@ -1,16 +1,31 @@
|
|||||||
## Using llama.cpp in the web UI
|
# Using llama.cpp in the web UI
|
||||||
|
|
||||||
#### Pre-converted models
|
## Setting up the models
|
||||||
|
|
||||||
|
#### Pre-converted
|
||||||
|
|
||||||
Place the model in the `models` folder, making sure that its name contains `ggml` somewhere and ends in `.bin`.
|
Place the model in the `models` folder, making sure that its name contains `ggml` somewhere and ends in `.bin`.
|
||||||
|
|
||||||
#### Convert LLaMA yourself
|
#### Convert LLaMA yourself
|
||||||
|
|
||||||
Follow the instructions in the llama.cpp README to generate the `ggml-model-q4_0.bin` file: https://github.com/ggerganov/llama.cpp#usage
|
Follow the instructions in the llama.cpp README to generate the `ggml-model.bin` file: https://github.com/ggerganov/llama.cpp#usage
|
||||||
|
|
||||||
|
## GPU offloading
|
||||||
|
|
||||||
|
Enabled with the `--n-gpu-layers` parameter. If you have enough VRAM, use a high number like `--n-gpu-layers 200000` to offload all layers to the GPU.
|
||||||
|
|
||||||
|
Note that you need to manually install `llama-cpp-python` with GPU support. To do that:
|
||||||
|
|
||||||
|
```
|
||||||
|
pip uninstall -y llama-cpp-python
|
||||||
|
CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
|
||||||
|
```
|
||||||
|
|
||||||
|
Here you can find the different compilation options for OpenBLAS / cuBLAS / CLBlast: https://pypi.org/project/llama-cpp-python/
|
||||||
|
|
||||||
## Performance
|
## Performance
|
||||||
|
|
||||||
This was the performance of llama-7b int4 on my i5-12400F:
|
This was the performance of llama-7b int4 on my i5-12400F (cpu only):
|
||||||
|
|
||||||
> Output generated in 33.07 seconds (6.05 tokens/s, 200 tokens, context 17)
|
> Output generated in 33.07 seconds (6.05 tokens/s, 200 tokens, context 17)
|
||||||
|
|
||||||
|
@ -27,7 +27,8 @@ class LlamaCppModel:
|
|||||||
'n_threads': shared.args.threads or None,
|
'n_threads': shared.args.threads or None,
|
||||||
'n_batch': shared.args.n_batch,
|
'n_batch': shared.args.n_batch,
|
||||||
'use_mmap': not shared.args.no_mmap,
|
'use_mmap': not shared.args.no_mmap,
|
||||||
'use_mlock': shared.args.mlock
|
'use_mlock': shared.args.mlock,
|
||||||
|
'n_gpu_layers': shared.args.n_gpu_layers
|
||||||
}
|
}
|
||||||
self.model = Llama(**params)
|
self.model = Llama(**params)
|
||||||
self.model.set_cache(LlamaCache)
|
self.model.set_cache(LlamaCache)
|
||||||
|
@ -123,6 +123,7 @@ parser.add_argument('--threads', type=int, default=0, help='Number of threads to
|
|||||||
parser.add_argument('--n_batch', type=int, default=512, help='Maximum number of prompt tokens to batch together when calling llama_eval.')
|
parser.add_argument('--n_batch', type=int, default=512, help='Maximum number of prompt tokens to batch together when calling llama_eval.')
|
||||||
parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
|
parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
|
||||||
parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
|
parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
|
||||||
|
parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
||||||
|
|
||||||
# GPTQ
|
# GPTQ
|
||||||
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
||||||
|
Loading…
Reference in New Issue
Block a user