diff --git a/README.md b/README.md index fd5b1223..8b4885a4 100644 --- a/README.md +++ b/README.md @@ -242,6 +242,8 @@ Optionally, you can use the following command-line flags: | `--mlock` | Force the system to keep the model in RAM. | | `--cache-capacity CACHE_CAPACITY` | Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed. | | `--n-gpu-layers N_GPU_LAYERS` | Number of layers to offload to the GPU. Only works if llama-cpp-python was compiled with BLAS. Set this to 1000000000 to offload all layers to the GPU. | +| `--n_ctx N_CTX` | Size of the prompt context. | +| `--llama_cpp_seed SEED` | Seed for llama-cpp models. Default 0 (random). | #### GPTQ diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index 2d351b43..ea42dafb 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -39,8 +39,8 @@ class LlamaCppModel: params = { 'model_path': str(path), - 'n_ctx': 2048, - 'seed': 0, + 'n_ctx': shared.args.n_ctx, + 'seed': int(shared.args.llama_cpp_seed), 'n_threads': shared.args.threads or None, 'n_batch': shared.args.n_batch, 'use_mmap': not shared.args.no_mmap, diff --git a/modules/shared.py b/modules/shared.py index 11a4d11e..c338d64f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -134,6 +134,8 @@ parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from be parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.') parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.') parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.') +parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.') +parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)') # GPTQ parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.') diff --git a/modules/ui.py b/modules/ui.py index 22dba2ad..f1058821 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -30,7 +30,7 @@ theme = gr.themes.Default( def list_model_elements(): - elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers'] + elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed'] for i in range(torch.cuda.device_count()): elements.append(f'gpu_memory_{i}') diff --git a/server.py b/server.py index 229af41f..bfc72e9a 100644 --- a/server.py +++ b/server.py @@ -404,10 +404,12 @@ def create_model_menus(): shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads) shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch) shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=128, value=shared.args.n_gpu_layers) + shared.gradio['n_ctx'] = gr.Slider(0, 8192, label="n_ctx", value=shared.args.n_ctx) with gr.Column(): shared.gradio['no_mmap'] = gr.Checkbox(label="no-mmap", value=shared.args.no_mmap) shared.gradio['mlock'] = gr.Checkbox(label="mlock", value=shared.args.mlock) + shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed) with gr.Row(): shared.gradio['model_status'] = gr.Markdown('No model is loaded' if shared.model_name == 'None' else 'Ready')