mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
--no-cache and --gpu-memory in MiB for fine VRAM control
This commit is contained in:
parent
4bafe45a51
commit
ddb62470e9
@ -183,7 +183,8 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--disk-cache-dir DISK_CACHE_DIR` | Directory to save the disk cache to. Defaults to `cache/`. |
|
| `--disk-cache-dir DISK_CACHE_DIR` | Directory to save the disk cache to. Defaults to `cache/`. |
|
||||||
| `--gpu-memory GPU_MEMORY [GPU_MEMORY ...]` | Maxmimum GPU memory in GiB to be allocated per GPU. Example: `--gpu-memory 10` for a single GPU, `--gpu-memory 10 5` for two GPUs. |
|
| `--gpu-memory GPU_MEMORY [GPU_MEMORY ...]` | Maxmimum GPU memory in GiB to be allocated per GPU. Example: `--gpu-memory 10` for a single GPU, `--gpu-memory 10 5` for two GPUs. |
|
||||||
| `--cpu-memory CPU_MEMORY` | Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.|
|
| `--cpu-memory CPU_MEMORY` | Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.|
|
||||||
| `--flexgen` | Enable the use of FlexGen offloading. |
|
| `--no-cache` | Set `use_cache` to False while generating text. This reduces the VRAM usage a bit at a performance cost.')
|
||||||
|
| `--flexgen` | Enable the use of FlexGen offloading. |
|
||||||
| `--percent PERCENT [PERCENT ...]` | FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0). |
|
| `--percent PERCENT [PERCENT ...]` | FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0). |
|
||||||
| `--compress-weight` | FlexGen: Whether to compress weight (default: False).|
|
| `--compress-weight` | FlexGen: Whether to compress weight (default: False).|
|
||||||
| `--pin-weight [PIN_WEIGHT]` | FlexGen: whether to pin weights (setting this to False reduces CPU memory by 20%). |
|
| `--pin-weight [PIN_WEIGHT]` | FlexGen: whether to pin weights (setting this to False reduces CPU memory by 20%). |
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
import zipfile
|
import zipfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -120,11 +121,12 @@ def load_model(model_name):
|
|||||||
params["torch_dtype"] = torch.float16
|
params["torch_dtype"] = torch.float16
|
||||||
|
|
||||||
if shared.args.gpu_memory:
|
if shared.args.gpu_memory:
|
||||||
memory_map = shared.args.gpu_memory
|
memory_map = list(map(lambda x : x.strip(), shared.args.gpu_memory))
|
||||||
|
max_cpu_memory = shared.args.cpu_memory.strip() if shared.args.cpu_memory is not None else '99GiB'
|
||||||
max_memory = {}
|
max_memory = {}
|
||||||
for i in range(len(memory_map)):
|
for i in range(len(memory_map)):
|
||||||
max_memory[i] = f'{memory_map[i]}GiB'
|
max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i]
|
||||||
max_memory['cpu'] = f'{shared.args.cpu_memory or 99}GiB'
|
max_memory['cpu'] = max_cpu_memory
|
||||||
params['max_memory'] = max_memory
|
params['max_memory'] = max_memory
|
||||||
elif shared.args.auto_devices:
|
elif shared.args.auto_devices:
|
||||||
total_mem = (torch.cuda.get_device_properties(0).total_memory / (1024*1024))
|
total_mem = (torch.cuda.get_device_properties(0).total_memory / (1024*1024))
|
||||||
|
@ -85,8 +85,9 @@ parser.add_argument('--bf16', action='store_true', help='Load the model with bfl
|
|||||||
parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
|
parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
|
||||||
parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
|
parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
|
||||||
parser.add_argument('--disk-cache-dir', type=str, default="cache", help='Directory to save the disk cache to. Defaults to "cache".')
|
parser.add_argument('--disk-cache-dir', type=str, default="cache", help='Directory to save the disk cache to. Defaults to "cache".')
|
||||||
parser.add_argument('--gpu-memory', type=int, nargs="+", help='Maxmimum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs.')
|
parser.add_argument('--gpu-memory', type=str, nargs="+", help='Maxmimum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs.')
|
||||||
parser.add_argument('--cpu-memory', type=int, help='Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.')
|
parser.add_argument('--cpu-memory', type=str, help='Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.')
|
||||||
|
parser.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces the VRAM usage a bit at a performance cost.')
|
||||||
parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.')
|
parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.')
|
||||||
parser.add_argument('--percent', type=int, nargs="+", default=[0, 100, 100, 0, 100, 0], help='FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0).')
|
parser.add_argument('--percent', type=int, nargs="+", default=[0, 100, 100, 0, 100, 0], help='FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0).')
|
||||||
parser.add_argument("--compress-weight", action="store_true", help="FlexGen: activate weight compression.")
|
parser.add_argument("--compress-weight", action="store_true", help="FlexGen: activate weight compression.")
|
||||||
|
@ -136,7 +136,9 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
t = encode(stopping_string, 0, add_special_tokens=False)
|
t = encode(stopping_string, 0, add_special_tokens=False)
|
||||||
stopping_criteria_list.append(_SentinelTokenStoppingCriteria(sentinel_token_ids=t, starting_idx=len(input_ids[0])))
|
stopping_criteria_list.append(_SentinelTokenStoppingCriteria(sentinel_token_ids=t, starting_idx=len(input_ids[0])))
|
||||||
|
|
||||||
generate_params = {}
|
generate_params = {
|
||||||
|
'use_cache': not shared.args.no_cache,
|
||||||
|
}
|
||||||
if not shared.args.flexgen:
|
if not shared.args.flexgen:
|
||||||
generate_params.update({
|
generate_params.update({
|
||||||
"max_new_tokens": max_new_tokens,
|
"max_new_tokens": max_new_tokens,
|
||||||
|
Loading…
Reference in New Issue
Block a user