mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-25 17:29:22 +01:00
commit
d1115f18b9
44
README.md
44
README.md
@ -204,16 +204,16 @@ List of command-line flags
|
|||||||
usage: server.py [-h] [--multi-user] [--character CHARACTER] [--model MODEL] [--lora LORA [LORA ...]] [--model-dir MODEL_DIR] [--lora-dir LORA_DIR] [--model-menu] [--settings SETTINGS]
|
usage: server.py [-h] [--multi-user] [--character CHARACTER] [--model MODEL] [--lora LORA [LORA ...]] [--model-dir MODEL_DIR] [--lora-dir LORA_DIR] [--model-menu] [--settings SETTINGS]
|
||||||
[--extensions EXTENSIONS [EXTENSIONS ...]] [--verbose] [--chat-buttons] [--idle-timeout IDLE_TIMEOUT] [--loader LOADER] [--cpu] [--auto-devices]
|
[--extensions EXTENSIONS [EXTENSIONS ...]] [--verbose] [--chat-buttons] [--idle-timeout IDLE_TIMEOUT] [--loader LOADER] [--cpu] [--auto-devices]
|
||||||
[--gpu-memory GPU_MEMORY [GPU_MEMORY ...]] [--cpu-memory CPU_MEMORY] [--disk] [--disk-cache-dir DISK_CACHE_DIR] [--load-in-8bit] [--bf16] [--no-cache] [--trust-remote-code]
|
[--gpu-memory GPU_MEMORY [GPU_MEMORY ...]] [--cpu-memory CPU_MEMORY] [--disk] [--disk-cache-dir DISK_CACHE_DIR] [--load-in-8bit] [--bf16] [--no-cache] [--trust-remote-code]
|
||||||
[--force-safetensors] [--no_use_fast] [--use_flash_attention_2] [--load-in-4bit] [--use_double_quant] [--compute_dtype COMPUTE_DTYPE] [--quant_type QUANT_TYPE] [--flash-attn]
|
[--force-safetensors] [--no_use_fast] [--use_flash_attention_2] [--use_eager_attention] [--load-in-4bit] [--use_double_quant] [--compute_dtype COMPUTE_DTYPE] [--quant_type QUANT_TYPE]
|
||||||
[--tensorcores] [--n_ctx N_CTX] [--threads THREADS] [--threads-batch THREADS_BATCH] [--no_mul_mat_q] [--n_batch N_BATCH] [--no-mmap] [--mlock] [--n-gpu-layers N_GPU_LAYERS]
|
[--flash-attn] [--tensorcores] [--n_ctx N_CTX] [--threads THREADS] [--threads-batch THREADS_BATCH] [--no_mul_mat_q] [--n_batch N_BATCH] [--no-mmap] [--mlock]
|
||||||
[--tensor_split TENSOR_SPLIT] [--numa] [--logits_all] [--no_offload_kqv] [--cache-capacity CACHE_CAPACITY] [--row_split] [--streaming-llm] [--attention-sink-size ATTENTION_SINK_SIZE]
|
[--n-gpu-layers N_GPU_LAYERS] [--tensor_split TENSOR_SPLIT] [--numa] [--logits_all] [--no_offload_kqv] [--cache-capacity CACHE_CAPACITY] [--row_split] [--streaming-llm]
|
||||||
[--gpu-split GPU_SPLIT] [--autosplit] [--max_seq_len MAX_SEQ_LEN] [--cfg-cache] [--no_flash_attn] [--cache_8bit] [--cache_4bit] [--num_experts_per_token NUM_EXPERTS_PER_TOKEN]
|
[--attention-sink-size ATTENTION_SINK_SIZE] [--gpu-split GPU_SPLIT] [--autosplit] [--max_seq_len MAX_SEQ_LEN] [--cfg-cache] [--no_flash_attn] [--no_xformers] [--no_sdpa]
|
||||||
[--triton] [--no_inject_fused_mlp] [--no_use_cuda_fp16] [--desc_act] [--disable_exllama] [--disable_exllamav2] [--wbits WBITS] [--groupsize GROUPSIZE] [--no_inject_fused_attention]
|
[--cache_8bit] [--cache_4bit] [--num_experts_per_token NUM_EXPERTS_PER_TOKEN] [--triton] [--no_inject_fused_mlp] [--no_use_cuda_fp16] [--desc_act] [--disable_exllama]
|
||||||
[--hqq-backend HQQ_BACKEND] [--deepspeed] [--nvme-offload-dir NVME_OFFLOAD_DIR] [--local_rank LOCAL_RANK] [--alpha_value ALPHA_VALUE] [--rope_freq_base ROPE_FREQ_BASE]
|
[--disable_exllamav2] [--wbits WBITS] [--groupsize GROUPSIZE] [--no_inject_fused_attention] [--hqq-backend HQQ_BACKEND] [--cpp-runner] [--deepspeed]
|
||||||
[--compress_pos_emb COMPRESS_POS_EMB] [--listen] [--listen-port LISTEN_PORT] [--listen-host LISTEN_HOST] [--share] [--auto-launch] [--gradio-auth GRADIO_AUTH]
|
[--nvme-offload-dir NVME_OFFLOAD_DIR] [--local_rank LOCAL_RANK] [--alpha_value ALPHA_VALUE] [--rope_freq_base ROPE_FREQ_BASE] [--compress_pos_emb COMPRESS_POS_EMB] [--listen]
|
||||||
[--gradio-auth-path GRADIO_AUTH_PATH] [--ssl-keyfile SSL_KEYFILE] [--ssl-certfile SSL_CERTFILE] [--api] [--public-api] [--public-api-id PUBLIC_API_ID] [--api-port API_PORT]
|
[--listen-port LISTEN_PORT] [--listen-host LISTEN_HOST] [--share] [--auto-launch] [--gradio-auth GRADIO_AUTH] [--gradio-auth-path GRADIO_AUTH_PATH] [--ssl-keyfile SSL_KEYFILE]
|
||||||
[--api-key API_KEY] [--admin-key ADMIN_KEY] [--nowebui] [--multimodal-pipeline MULTIMODAL_PIPELINE] [--model_type MODEL_TYPE] [--pre_layer PRE_LAYER [PRE_LAYER ...]]
|
[--ssl-certfile SSL_CERTFILE] [--subpath SUBPATH] [--api] [--public-api] [--public-api-id PUBLIC_API_ID] [--api-port API_PORT] [--api-key API_KEY] [--admin-key ADMIN_KEY] [--nowebui]
|
||||||
[--checkpoint CHECKPOINT] [--monkey-patch]
|
[--multimodal-pipeline MULTIMODAL_PIPELINE] [--model_type MODEL_TYPE] [--pre_layer PRE_LAYER [PRE_LAYER ...]] [--checkpoint CHECKPOINT] [--monkey-patch]
|
||||||
|
|
||||||
Text generation web UI
|
Text generation web UI
|
||||||
|
|
||||||
@ -254,6 +254,7 @@ Transformers/Accelerate:
|
|||||||
--force-safetensors Set use_safetensors=True while loading the model. This prevents arbitrary code execution.
|
--force-safetensors Set use_safetensors=True while loading the model. This prevents arbitrary code execution.
|
||||||
--no_use_fast Set use_fast=False while loading the tokenizer (it's True by default). Use this if you have any problems related to use_fast.
|
--no_use_fast Set use_fast=False while loading the tokenizer (it's True by default). Use this if you have any problems related to use_fast.
|
||||||
--use_flash_attention_2 Set use_flash_attention_2=True while loading the model.
|
--use_flash_attention_2 Set use_flash_attention_2=True while loading the model.
|
||||||
|
--use_eager_attention Set attn_implementation= eager while loading the model.
|
||||||
|
|
||||||
bitsandbytes 4-bit:
|
bitsandbytes 4-bit:
|
||||||
--load-in-4bit Load the model with 4-bit precision (using bitsandbytes).
|
--load-in-4bit Load the model with 4-bit precision (using bitsandbytes).
|
||||||
@ -263,7 +264,7 @@ bitsandbytes 4-bit:
|
|||||||
|
|
||||||
llama.cpp:
|
llama.cpp:
|
||||||
--flash-attn Use flash-attention.
|
--flash-attn Use flash-attention.
|
||||||
--tensorcores Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only.
|
--tensorcores NVIDIA only: use llama-cpp-python compiled with tensor cores support. This may increase performance on newer cards.
|
||||||
--n_ctx N_CTX Size of the prompt context.
|
--n_ctx N_CTX Size of the prompt context.
|
||||||
--threads THREADS Number of threads to use.
|
--threads THREADS Number of threads to use.
|
||||||
--threads-batch THREADS_BATCH Number of threads to use for batches/prompt processing.
|
--threads-batch THREADS_BATCH Number of threads to use for batches/prompt processing.
|
||||||
@ -272,7 +273,7 @@ llama.cpp:
|
|||||||
--no-mmap Prevent mmap from being used.
|
--no-mmap Prevent mmap from being used.
|
||||||
--mlock Force the system to keep the model in RAM.
|
--mlock Force the system to keep the model in RAM.
|
||||||
--n-gpu-layers N_GPU_LAYERS Number of layers to offload to the GPU.
|
--n-gpu-layers N_GPU_LAYERS Number of layers to offload to the GPU.
|
||||||
--tensor_split TENSOR_SPLIT Split the model across multiple GPUs. Comma-separated list of proportions. Example: 18,17.
|
--tensor_split TENSOR_SPLIT Split the model across multiple GPUs. Comma-separated list of proportions. Example: 60,40.
|
||||||
--numa Activate NUMA task allocation for llama.cpp.
|
--numa Activate NUMA task allocation for llama.cpp.
|
||||||
--logits_all Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.
|
--logits_all Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.
|
||||||
--no_offload_kqv Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.
|
--no_offload_kqv Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.
|
||||||
@ -287,6 +288,8 @@ ExLlamaV2:
|
|||||||
--max_seq_len MAX_SEQ_LEN Maximum sequence length.
|
--max_seq_len MAX_SEQ_LEN Maximum sequence length.
|
||||||
--cfg-cache ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.
|
--cfg-cache ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.
|
||||||
--no_flash_attn Force flash-attention to not be used.
|
--no_flash_attn Force flash-attention to not be used.
|
||||||
|
--no_xformers Force xformers to not be used.
|
||||||
|
--no_sdpa Force Torch SDPA to not be used.
|
||||||
--cache_8bit Use 8-bit cache to save VRAM.
|
--cache_8bit Use 8-bit cache to save VRAM.
|
||||||
--cache_4bit Use Q4 cache to save VRAM.
|
--cache_4bit Use Q4 cache to save VRAM.
|
||||||
--num_experts_per_token NUM_EXPERTS_PER_TOKEN Number of experts to use for generation. Applies to MoE models like Mixtral.
|
--num_experts_per_token NUM_EXPERTS_PER_TOKEN Number of experts to use for generation. Applies to MoE models like Mixtral.
|
||||||
@ -307,6 +310,9 @@ AutoAWQ:
|
|||||||
HQQ:
|
HQQ:
|
||||||
--hqq-backend HQQ_BACKEND Backend for the HQQ loader. Valid options: PYTORCH, PYTORCH_COMPILE, ATEN.
|
--hqq-backend HQQ_BACKEND Backend for the HQQ loader. Valid options: PYTORCH, PYTORCH_COMPILE, ATEN.
|
||||||
|
|
||||||
|
TensorRT-LLM:
|
||||||
|
--cpp-runner Use the ModelRunnerCpp runner, which is faster than the default ModelRunner but doesn't support streaming yet.
|
||||||
|
|
||||||
DeepSpeed:
|
DeepSpeed:
|
||||||
--deepspeed Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration.
|
--deepspeed Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration.
|
||||||
--nvme-offload-dir NVME_OFFLOAD_DIR DeepSpeed: Directory to use for ZeRO-3 NVME offloading.
|
--nvme-offload-dir NVME_OFFLOAD_DIR DeepSpeed: Directory to use for ZeRO-3 NVME offloading.
|
||||||
@ -327,6 +333,7 @@ Gradio:
|
|||||||
--gradio-auth-path GRADIO_AUTH_PATH Set the Gradio authentication file path. The file should contain one or more user:password pairs in the same format as above.
|
--gradio-auth-path GRADIO_AUTH_PATH Set the Gradio authentication file path. The file should contain one or more user:password pairs in the same format as above.
|
||||||
--ssl-keyfile SSL_KEYFILE The path to the SSL certificate key file.
|
--ssl-keyfile SSL_KEYFILE The path to the SSL certificate key file.
|
||||||
--ssl-certfile SSL_CERTFILE The path to the SSL certificate cert file.
|
--ssl-certfile SSL_CERTFILE The path to the SSL certificate cert file.
|
||||||
|
--subpath SUBPATH Customize the subpath for gradio, use with reverse proxy
|
||||||
|
|
||||||
API:
|
API:
|
||||||
--api Enable the API extension.
|
--api Enable the API extension.
|
||||||
@ -392,18 +399,11 @@ Run `python download-model.py --help` to see all the options.
|
|||||||
|
|
||||||
https://colab.research.google.com/github/oobabooga/text-generation-webui/blob/main/Colab-TextGen-GPU.ipynb
|
https://colab.research.google.com/github/oobabooga/text-generation-webui/blob/main/Colab-TextGen-GPU.ipynb
|
||||||
|
|
||||||
## Acknowledgment
|
## Community
|
||||||
|
|
||||||
In August 2023, [Andreessen Horowitz](https://a16z.com/) (a16z) provided a generous grant to encourage and support my independent work on this project. I am **extremely** grateful for their trust and recognition.
|
|
||||||
|
|
||||||
## Links
|
|
||||||
|
|
||||||
#### Community
|
|
||||||
|
|
||||||
* Subreddit: https://www.reddit.com/r/oobabooga/
|
* Subreddit: https://www.reddit.com/r/oobabooga/
|
||||||
* Discord: https://discord.gg/jwZCF2dPQN
|
* Discord: https://discord.gg/jwZCF2dPQN
|
||||||
|
|
||||||
#### Support
|
## Acknowledgment
|
||||||
|
|
||||||
* ko-fi: https://ko-fi.com/oobabooga
|
In August 2023, [Andreessen Horowitz](https://a16z.com/) (a16z) provided a generous grant to encourage and support my independent work on this project. I am **extremely** grateful for their trust and recognition.
|
||||||
* GitHub Sponsors: https://github.com/sponsors/oobabooga
|
|
||||||
|
@ -90,6 +90,7 @@
|
|||||||
line-height: 1.428571429 !important;
|
line-height: 1.428571429 !important;
|
||||||
color: rgb(243 244 246) !important;
|
color: rgb(243 244 246) !important;
|
||||||
text-shadow: 2px 2px 2px rgb(0 0 0);
|
text-shadow: 2px 2px 2px rgb(0 0 0);
|
||||||
|
font-weight: 500;
|
||||||
}
|
}
|
||||||
|
|
||||||
.message-body p em {
|
.message-body p em {
|
||||||
|
@ -46,6 +46,7 @@
|
|||||||
.message-body p {
|
.message-body p {
|
||||||
font-size: 15px !important;
|
font-size: 15px !important;
|
||||||
line-height: 22.5px !important;
|
line-height: 22.5px !important;
|
||||||
|
font-weight: 500;
|
||||||
}
|
}
|
||||||
|
|
||||||
.message-body p, .chat .message-body ul, .chat .message-body ol {
|
.message-body p, .chat .message-body ul, .chat .message-body ol {
|
||||||
|
@ -88,6 +88,7 @@
|
|||||||
margin-bottom: 0 !important;
|
margin-bottom: 0 !important;
|
||||||
font-size: 15px !important;
|
font-size: 15px !important;
|
||||||
line-height: 1.428571429 !important;
|
line-height: 1.428571429 !important;
|
||||||
|
font-weight: 500;
|
||||||
}
|
}
|
||||||
|
|
||||||
.dark .message-body p em {
|
.dark .message-body p em {
|
||||||
|
@ -44,6 +44,7 @@
|
|||||||
margin-bottom: 0 !important;
|
margin-bottom: 0 !important;
|
||||||
font-size: 15px !important;
|
font-size: 15px !important;
|
||||||
line-height: 1.428571429 !important;
|
line-height: 1.428571429 !important;
|
||||||
|
font-weight: 500;
|
||||||
}
|
}
|
||||||
|
|
||||||
.dark .message-body p em {
|
.dark .message-body p em {
|
||||||
|
10
css/highlightjs/github.min.css
vendored
Normal file
10
css/highlightjs/github.min.css
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}/*!
|
||||||
|
Theme: GitHub
|
||||||
|
Description: Light theme as seen on github.com
|
||||||
|
Author: github.com
|
||||||
|
Maintainer: @Hirse
|
||||||
|
Updated: 2021-05-15
|
||||||
|
|
||||||
|
Outdated base version: https://github.com/primer/github-syntax-light
|
||||||
|
Current colors taken from GitHub's CSS
|
||||||
|
*/.hljs{color:#24292e;background:#fff}.hljs-doctag,.hljs-keyword,.hljs-meta .hljs-keyword,.hljs-template-tag,.hljs-template-variable,.hljs-type,.hljs-variable.language_{color:#d73a49}.hljs-title,.hljs-title.class_,.hljs-title.class_.inherited__,.hljs-title.function_{color:#6f42c1}.hljs-attr,.hljs-attribute,.hljs-literal,.hljs-meta,.hljs-number,.hljs-operator,.hljs-selector-attr,.hljs-selector-class,.hljs-selector-id,.hljs-variable{color:#005cc5}.hljs-meta .hljs-string,.hljs-regexp,.hljs-string{color:#032f62}.hljs-built_in,.hljs-symbol{color:#e36209}.hljs-code,.hljs-comment,.hljs-formula{color:#6a737d}.hljs-name,.hljs-quote,.hljs-selector-pseudo,.hljs-selector-tag{color:#22863a}.hljs-subst{color:#24292e}.hljs-section{color:#005cc5;font-weight:700}.hljs-bullet{color:#735c0f}.hljs-emphasis{color:#24292e;font-style:italic}.hljs-strong{color:#24292e;font-weight:700}.hljs-addition{color:#22863a;background-color:#f0fff4}.hljs-deletion{color:#b31d28;background-color:#ffeef0}
|
21
css/main.css
21
css/main.css
@ -62,10 +62,6 @@ ol li p, ul li p {
|
|||||||
border: 0;
|
border: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
.gradio-container-3-18-0 .prose * h1, h2, h3, h4 {
|
|
||||||
color: white;
|
|
||||||
}
|
|
||||||
|
|
||||||
.gradio-container {
|
.gradio-container {
|
||||||
max-width: 100% !important;
|
max-width: 100% !important;
|
||||||
padding-top: 0 !important;
|
padding-top: 0 !important;
|
||||||
@ -378,6 +374,10 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.chat-parent .prose {
|
||||||
|
visibility: visible;
|
||||||
|
}
|
||||||
|
|
||||||
.old-ui .chat-parent {
|
.old-ui .chat-parent {
|
||||||
height: calc(100dvh - 192px - var(--header-height) - var(--input-delta));
|
height: calc(100dvh - 192px - var(--header-height) - var(--input-delta));
|
||||||
margin-bottom: var(--input-delta) !important;
|
margin-bottom: var(--input-delta) !important;
|
||||||
@ -399,6 +399,13 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
|
|||||||
padding-bottom: 15px !important;
|
padding-bottom: 15px !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.message-body h1,
|
||||||
|
.message-body h2,
|
||||||
|
.message-body h3,
|
||||||
|
.message-body h4 {
|
||||||
|
color: var(--body-text-color);
|
||||||
|
}
|
||||||
|
|
||||||
.message-body li {
|
.message-body li {
|
||||||
list-style-position: outside;
|
list-style-position: outside;
|
||||||
}
|
}
|
||||||
@ -447,6 +454,11 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
|
|||||||
border-radius: 5px;
|
border-radius: 5px;
|
||||||
font-size: 82%;
|
font-size: 82%;
|
||||||
padding: 1px 3px;
|
padding: 1px 3px;
|
||||||
|
background: white !important;
|
||||||
|
color: #1f2328;
|
||||||
|
}
|
||||||
|
|
||||||
|
.dark .message-body code {
|
||||||
background: #0d1117 !important;
|
background: #0d1117 !important;
|
||||||
color: rgb(201 209 217);
|
color: rgb(201 209 217);
|
||||||
}
|
}
|
||||||
@ -796,4 +808,3 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
|
|||||||
max-height: 300px;
|
max-height: 300px;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
9
js/dark_theme.js
Normal file
9
js/dark_theme.js
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
function toggleDarkMode() {
|
||||||
|
document.body.classList.toggle("dark");
|
||||||
|
var currentCSS = document.getElementById("highlight-css");
|
||||||
|
if (currentCSS.getAttribute("href") === "file/css/highlightjs/github-dark.min.css") {
|
||||||
|
currentCSS.setAttribute("href", "file/css/highlightjs/github.min.css");
|
||||||
|
} else {
|
||||||
|
currentCSS.setAttribute("href", "file/css/highlightjs/github-dark.min.css");
|
||||||
|
}
|
||||||
|
}
|
@ -445,14 +445,12 @@ function updateCssProperties() {
|
|||||||
|
|
||||||
// Check if the chat container is visible
|
// Check if the chat container is visible
|
||||||
if (chatContainer.clientHeight > 0) {
|
if (chatContainer.clientHeight > 0) {
|
||||||
|
|
||||||
// Calculate new chat height and adjust CSS properties
|
|
||||||
var numericHeight = chatContainer.parentNode.clientHeight - chatInputHeight + 40 - 100;
|
var numericHeight = chatContainer.parentNode.clientHeight - chatInputHeight + 40 - 100;
|
||||||
if (document.getElementById("chat-tab").style.paddingBottom != "") {
|
if (document.getElementById("chat-tab").style.paddingBottom != "") {
|
||||||
numericHeight += 20;
|
numericHeight += 20;
|
||||||
}
|
}
|
||||||
const newChatHeight = `${numericHeight}px`;
|
|
||||||
|
|
||||||
|
const newChatHeight = `${numericHeight}px`;
|
||||||
document.documentElement.style.setProperty("--chat-height", newChatHeight);
|
document.documentElement.style.setProperty("--chat-height", newChatHeight);
|
||||||
document.documentElement.style.setProperty("--input-delta", `${chatInputHeight - 40}px`);
|
document.documentElement.style.setProperty("--input-delta", `${chatInputHeight - 40}px`);
|
||||||
|
|
||||||
@ -463,15 +461,14 @@ function updateCssProperties() {
|
|||||||
|
|
||||||
// Adjust scrollTop based on input height change
|
// Adjust scrollTop based on input height change
|
||||||
if (chatInputHeight !== currentChatInputHeight) {
|
if (chatInputHeight !== currentChatInputHeight) {
|
||||||
chatContainer.scrollTop += chatInputHeight > currentChatInputHeight ? chatInputHeight : -chatInputHeight + 40;
|
chatContainer.scrollTop += chatInputHeight - currentChatInputHeight;
|
||||||
currentChatInputHeight = chatInputHeight;
|
currentChatInputHeight = chatInputHeight;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Observe textarea size changes and call update function
|
// Observe textarea size changes and call update function
|
||||||
new ResizeObserver(updateCssProperties)
|
new ResizeObserver(updateCssProperties).observe(document.querySelector("#chat-input textarea"));
|
||||||
.observe(document.querySelector("#chat-input textarea"));
|
|
||||||
|
|
||||||
// Handle changes in window size
|
// Handle changes in window size
|
||||||
window.addEventListener("resize", updateCssProperties);
|
window.addEventListener("resize", updateCssProperties);
|
||||||
|
@ -3,6 +3,7 @@ import io
|
|||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from modules import shared
|
||||||
from modules.logging_colors import logger
|
from modules.logging_colors import logger
|
||||||
|
|
||||||
original_open = open
|
original_open = open
|
||||||
@ -54,6 +55,7 @@ def my_open(*args, **kwargs):
|
|||||||
'\n <script src="file/js/katex/auto-render.min.js"></script>'
|
'\n <script src="file/js/katex/auto-render.min.js"></script>'
|
||||||
'\n <script src="file/js/highlightjs/highlight.min.js"></script>'
|
'\n <script src="file/js/highlightjs/highlight.min.js"></script>'
|
||||||
'\n <script src="file/js/highlightjs/highlightjs-copy.min.js"></script>'
|
'\n <script src="file/js/highlightjs/highlightjs-copy.min.js"></script>'
|
||||||
|
f'\n <link id="highlight-css" rel="stylesheet" href="file/css/highlightjs/{"github-dark" if shared.settings["dark_theme"] else "github"}.min.css">'
|
||||||
'\n <script>hljs.addPlugin(new CopyButtonPlugin());</script>'
|
'\n <script>hljs.addPlugin(new CopyButtonPlugin());</script>'
|
||||||
'\n </head>'
|
'\n </head>'
|
||||||
)
|
)
|
||||||
|
205
modules/chat.py
205
modules/chat.py
@ -22,7 +22,8 @@ from modules.logging_colors import logger
|
|||||||
from modules.text_generation import (
|
from modules.text_generation import (
|
||||||
generate_reply,
|
generate_reply,
|
||||||
get_encoded_length,
|
get_encoded_length,
|
||||||
get_max_prompt_length
|
get_max_prompt_length,
|
||||||
|
stop_everything_event
|
||||||
)
|
)
|
||||||
from modules.utils import delete_file, get_available_characters, save_file
|
from modules.utils import delete_file, get_available_characters, save_file
|
||||||
|
|
||||||
@ -421,9 +422,12 @@ def generate_chat_reply_wrapper(text, state, regenerate=False, _continue=False):
|
|||||||
send_dummy_message(text, state)
|
send_dummy_message(text, state)
|
||||||
send_dummy_reply(state['start_with'], state)
|
send_dummy_reply(state['start_with'], state)
|
||||||
|
|
||||||
|
history = state['history']
|
||||||
for i, history in enumerate(generate_chat_reply(text, state, regenerate, _continue, loading_message=True, for_ui=True)):
|
for i, history in enumerate(generate_chat_reply(text, state, regenerate, _continue, loading_message=True, for_ui=True)):
|
||||||
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu']), history
|
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu']), history
|
||||||
|
|
||||||
|
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
|
||||||
|
|
||||||
|
|
||||||
def remove_last_message(history):
|
def remove_last_message(history):
|
||||||
if len(history['visible']) > 0 and history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
|
if len(history['visible']) > 0 and history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
|
||||||
@ -995,3 +999,202 @@ def my_yaml_output(data):
|
|||||||
result += " " + line.rstrip(' ') + "\n"
|
result += " " + line.rstrip(' ') + "\n"
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def handle_replace_last_reply_click(text, state):
|
||||||
|
history = replace_last_reply(text, state)
|
||||||
|
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [history, html, ""]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_send_dummy_message_click(text, state):
|
||||||
|
history = send_dummy_message(text, state)
|
||||||
|
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [history, html, ""]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_send_dummy_reply_click(text, state):
|
||||||
|
history = send_dummy_reply(text, state)
|
||||||
|
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [history, html, ""]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_remove_last_click(state):
|
||||||
|
last_input, history = remove_last_message(state['history'])
|
||||||
|
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [history, html, last_input]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_stop_click(state):
|
||||||
|
stop_everything_event()
|
||||||
|
html = redraw_html(state['history'], state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return html
|
||||||
|
|
||||||
|
|
||||||
|
def handle_unique_id_select(state):
|
||||||
|
history = load_history(state['unique_id'], state['character_menu'], state['mode'])
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [history, html]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_start_new_chat_click(state):
|
||||||
|
history = start_new_chat(state)
|
||||||
|
histories = find_all_histories_with_first_prompts(state)
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [history, html, gr.update(choices=histories, value=histories[0][1])]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_delete_chat_confirm_click(state):
|
||||||
|
index = str(find_all_histories(state).index(state['unique_id']))
|
||||||
|
delete_history(state['unique_id'], state['character_menu'], state['mode'])
|
||||||
|
history, unique_id = load_history_after_deletion(state, index)
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [
|
||||||
|
history,
|
||||||
|
html,
|
||||||
|
unique_id,
|
||||||
|
gr.update(visible=False),
|
||||||
|
gr.update(visible=True),
|
||||||
|
gr.update(visible=False)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_rename_chat_click():
|
||||||
|
return [
|
||||||
|
gr.update(visible=True, value="My New Chat"),
|
||||||
|
gr.update(visible=True),
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_rename_chat_confirm(rename_to, state):
|
||||||
|
rename_history(state['unique_id'], rename_to, state['character_menu'], state['mode'])
|
||||||
|
histories = find_all_histories_with_first_prompts(state)
|
||||||
|
|
||||||
|
return [
|
||||||
|
gr.update(choices=histories, value=rename_to),
|
||||||
|
gr.update(visible=False),
|
||||||
|
gr.update(visible=False),
|
||||||
|
gr.update(visible=False)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_upload_chat_history(load_chat_history, state):
|
||||||
|
history = start_new_chat(state)
|
||||||
|
history = load_history_json(load_chat_history, history)
|
||||||
|
histories = find_all_histories_with_first_prompts(state)
|
||||||
|
save_history(history, state['unique_id'], state['character_menu'], state['mode'])
|
||||||
|
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [
|
||||||
|
history,
|
||||||
|
html,
|
||||||
|
gr.update(choices=histories, value=histories[0][1])
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_character_menu_change(state):
|
||||||
|
name1, name2, picture, greeting, context = load_character(state['character_menu'], state['name1'], state['name2'])
|
||||||
|
|
||||||
|
state['name1'] = name1
|
||||||
|
state['name2'] = name2
|
||||||
|
state['character_picture'] = picture
|
||||||
|
state['greeting'] = greeting
|
||||||
|
state['context'] = context
|
||||||
|
|
||||||
|
history = load_latest_history(state)
|
||||||
|
histories = find_all_histories_with_first_prompts(state)
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [
|
||||||
|
history,
|
||||||
|
html,
|
||||||
|
name1,
|
||||||
|
name2,
|
||||||
|
picture,
|
||||||
|
greeting,
|
||||||
|
context,
|
||||||
|
gr.update(choices=histories, value=histories[0][1]),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_mode_change(state):
|
||||||
|
history = load_latest_history(state)
|
||||||
|
histories = find_all_histories_with_first_prompts(state)
|
||||||
|
html = redraw_html(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'])
|
||||||
|
|
||||||
|
return [
|
||||||
|
history,
|
||||||
|
html,
|
||||||
|
gr.update(visible=state['mode'] != 'instruct'),
|
||||||
|
gr.update(visible=state['mode'] == 'chat-instruct'),
|
||||||
|
gr.update(choices=histories, value=histories[0][1])
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_save_character_click(name2):
|
||||||
|
return [
|
||||||
|
name2,
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_load_template_click(instruction_template):
|
||||||
|
output = load_instruction_template(instruction_template)
|
||||||
|
return [
|
||||||
|
output,
|
||||||
|
"Select template to load..."
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_save_template_click(instruction_template_str):
|
||||||
|
contents = generate_instruction_template_yaml(instruction_template_str)
|
||||||
|
return [
|
||||||
|
"My Template.yaml",
|
||||||
|
"instruction-templates/",
|
||||||
|
contents,
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_delete_template_click(template):
|
||||||
|
return [
|
||||||
|
f"{template}.yaml",
|
||||||
|
"instruction-templates/",
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_your_picture_change(picture, state):
|
||||||
|
upload_your_profile_picture(picture)
|
||||||
|
html = redraw_html(state['history'], state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu'], reset_cache=True)
|
||||||
|
|
||||||
|
return html
|
||||||
|
|
||||||
|
|
||||||
|
def handle_send_instruction_click(state):
|
||||||
|
state['mode'] = 'instruct'
|
||||||
|
state['history'] = {'internal': [], 'visible': []}
|
||||||
|
|
||||||
|
output = generate_chat_prompt("Input", state)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def handle_send_chat_click(state):
|
||||||
|
output = generate_chat_prompt("", state, _continue=True)
|
||||||
|
|
||||||
|
return output
|
||||||
|
@ -33,7 +33,7 @@ settings = {
|
|||||||
'dark_theme': True,
|
'dark_theme': True,
|
||||||
'show_controls': True,
|
'show_controls': True,
|
||||||
'start_with': '',
|
'start_with': '',
|
||||||
'mode': 'chat',
|
'mode': 'chat-instruct',
|
||||||
'chat_style': 'cai-chat',
|
'chat_style': 'cai-chat',
|
||||||
'prompt-default': 'QA',
|
'prompt-default': 'QA',
|
||||||
'prompt-notebook': 'QA',
|
'prompt-notebook': 'QA',
|
||||||
@ -118,7 +118,7 @@ group.add_argument('--quant_type', type=str, default='nf4', help='quant_type for
|
|||||||
# llama.cpp
|
# llama.cpp
|
||||||
group = parser.add_argument_group('llama.cpp')
|
group = parser.add_argument_group('llama.cpp')
|
||||||
group.add_argument('--flash-attn', action='store_true', help='Use flash-attention.')
|
group.add_argument('--flash-attn', action='store_true', help='Use flash-attention.')
|
||||||
group.add_argument('--tensorcores', action='store_true', help='Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only.')
|
group.add_argument('--tensorcores', action='store_true', help='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This may increase performance on newer cards.')
|
||||||
group.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
group.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||||
group.add_argument('--threads', type=int, default=0, help='Number of threads to use.')
|
group.add_argument('--threads', type=int, default=0, help='Number of threads to use.')
|
||||||
group.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.')
|
group.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.')
|
||||||
@ -127,7 +127,7 @@ group.add_argument('--n_batch', type=int, default=512, help='Maximum number of p
|
|||||||
group.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
|
group.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
|
||||||
group.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
|
group.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
|
||||||
group.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
group.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
||||||
group.add_argument('--tensor_split', type=str, default=None, help='Split the model across multiple GPUs. Comma-separated list of proportions. Example: 18,17.')
|
group.add_argument('--tensor_split', type=str, default=None, help='Split the model across multiple GPUs. Comma-separated list of proportions. Example: 60,40.')
|
||||||
group.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.')
|
group.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.')
|
||||||
group.add_argument('--logits_all', action='store_true', help='Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.')
|
group.add_argument('--logits_all', action='store_true', help='Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.')
|
||||||
group.add_argument('--no_offload_kqv', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
|
group.add_argument('--no_offload_kqv', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
|
||||||
@ -195,6 +195,7 @@ group.add_argument('--gradio-auth', type=str, help='Set Gradio authentication pa
|
|||||||
group.add_argument('--gradio-auth-path', type=str, help='Set the Gradio authentication file path. The file should contain one or more user:password pairs in the same format as above.', default=None)
|
group.add_argument('--gradio-auth-path', type=str, help='Set the Gradio authentication file path. The file should contain one or more user:password pairs in the same format as above.', default=None)
|
||||||
group.add_argument('--ssl-keyfile', type=str, help='The path to the SSL certificate key file.', default=None)
|
group.add_argument('--ssl-keyfile', type=str, help='The path to the SSL certificate key file.', default=None)
|
||||||
group.add_argument('--ssl-certfile', type=str, help='The path to the SSL certificate cert file.', default=None)
|
group.add_argument('--ssl-certfile', type=str, help='The path to the SSL certificate cert file.', default=None)
|
||||||
|
group.add_argument('--subpath', type=str, help='Customize the subpath for gradio, use with reverse proxy')
|
||||||
|
|
||||||
# API
|
# API
|
||||||
group = parser.add_argument_group('API')
|
group = parser.add_argument_group('API')
|
||||||
|
@ -15,8 +15,6 @@ with open(Path(__file__).resolve().parent / '../css/main.css', 'r') as f:
|
|||||||
css += f.read()
|
css += f.read()
|
||||||
with open(Path(__file__).resolve().parent / '../css/katex/katex.min.css', 'r') as f:
|
with open(Path(__file__).resolve().parent / '../css/katex/katex.min.css', 'r') as f:
|
||||||
css += f.read()
|
css += f.read()
|
||||||
with open(Path(__file__).resolve().parent / '../css/highlightjs/github-dark.min.css', 'r') as f:
|
|
||||||
css += f.read()
|
|
||||||
with open(Path(__file__).resolve().parent / '../css/highlightjs/highlightjs-copy.min.css', 'r') as f:
|
with open(Path(__file__).resolve().parent / '../css/highlightjs/highlightjs-copy.min.css', 'r') as f:
|
||||||
css += f.read()
|
css += f.read()
|
||||||
with open(Path(__file__).resolve().parent / '../js/main.js', 'r') as f:
|
with open(Path(__file__).resolve().parent / '../js/main.js', 'r') as f:
|
||||||
@ -29,6 +27,8 @@ with open(Path(__file__).resolve().parent / '../js/show_controls.js', 'r') as f:
|
|||||||
show_controls_js = f.read()
|
show_controls_js = f.read()
|
||||||
with open(Path(__file__).resolve().parent / '../js/update_big_picture.js', 'r') as f:
|
with open(Path(__file__).resolve().parent / '../js/update_big_picture.js', 'r') as f:
|
||||||
update_big_picture_js = f.read()
|
update_big_picture_js = f.read()
|
||||||
|
with open(Path(__file__).resolve().parent / '../js/dark_theme.js', 'r') as f:
|
||||||
|
dark_theme_js = f.read()
|
||||||
|
|
||||||
refresh_symbol = '🔄'
|
refresh_symbol = '🔄'
|
||||||
delete_symbol = '🗑️'
|
delete_symbol = '🗑️'
|
||||||
@ -116,6 +116,7 @@ def list_model_elements():
|
|||||||
'hqq_backend',
|
'hqq_backend',
|
||||||
'cpp_runner',
|
'cpp_runner',
|
||||||
]
|
]
|
||||||
|
|
||||||
if is_torch_xpu_available():
|
if is_torch_xpu_available():
|
||||||
for i in range(torch.xpu.device_count()):
|
for i in range(torch.xpu.device_count()):
|
||||||
elements.append(f'gpu_memory_{i}')
|
elements.append(f'gpu_memory_{i}')
|
||||||
@ -184,6 +185,7 @@ def list_interface_input_elements():
|
|||||||
'start_with',
|
'start_with',
|
||||||
'character_menu',
|
'character_menu',
|
||||||
'history',
|
'history',
|
||||||
|
'unique_id',
|
||||||
'name1',
|
'name1',
|
||||||
'user_bio',
|
'user_bio',
|
||||||
'name2',
|
'name2',
|
||||||
@ -213,9 +215,11 @@ def list_interface_input_elements():
|
|||||||
|
|
||||||
|
|
||||||
def gather_interface_values(*args):
|
def gather_interface_values(*args):
|
||||||
|
interface_elements = list_interface_input_elements()
|
||||||
|
|
||||||
output = {}
|
output = {}
|
||||||
for i, element in enumerate(list_interface_input_elements()):
|
for element, value in zip(interface_elements, args):
|
||||||
output[element] = args[i]
|
output[element] = value
|
||||||
|
|
||||||
if not shared.args.multi_user:
|
if not shared.args.multi_user:
|
||||||
shared.persistent_interface_state = output
|
shared.persistent_interface_state = output
|
||||||
@ -226,8 +230,14 @@ def gather_interface_values(*args):
|
|||||||
def apply_interface_values(state, use_persistent=False):
|
def apply_interface_values(state, use_persistent=False):
|
||||||
if use_persistent:
|
if use_persistent:
|
||||||
state = shared.persistent_interface_state
|
state = shared.persistent_interface_state
|
||||||
|
if 'textbox-default' in state:
|
||||||
|
state.pop('prompt_menu-default')
|
||||||
|
|
||||||
|
if 'textbox-notebook' in state:
|
||||||
|
state.pop('prompt_menu-notebook')
|
||||||
|
|
||||||
elements = list_interface_input_elements()
|
elements = list_interface_input_elements()
|
||||||
|
|
||||||
if len(state) == 0:
|
if len(state) == 0:
|
||||||
return [gr.update() for k in elements] # Dummy, do nothing
|
return [gr.update() for k in elements] # Dummy, do nothing
|
||||||
else:
|
else:
|
||||||
@ -236,7 +246,7 @@ def apply_interface_values(state, use_persistent=False):
|
|||||||
|
|
||||||
def save_settings(state, preset, extensions_list, show_controls, theme_state):
|
def save_settings(state, preset, extensions_list, show_controls, theme_state):
|
||||||
output = copy.deepcopy(shared.settings)
|
output = copy.deepcopy(shared.settings)
|
||||||
exclude = ['name2', 'greeting', 'context', 'turn_template', 'truncation_length']
|
exclude = ['name2', 'greeting', 'context', 'truncation_length', 'instruction_template_str']
|
||||||
for k in state:
|
for k in state:
|
||||||
if k in shared.settings and k not in exclude:
|
if k in shared.settings and k not in exclude:
|
||||||
output[k] = state[k]
|
output[k] = state[k]
|
||||||
@ -268,7 +278,7 @@ def save_settings(state, preset, extensions_list, show_controls, theme_state):
|
|||||||
if key in shared.default_settings and output[key] == shared.default_settings[key]:
|
if key in shared.default_settings and output[key] == shared.default_settings[key]:
|
||||||
output.pop(key)
|
output.pop(key)
|
||||||
|
|
||||||
return yaml.dump(output, sort_keys=False, width=float("inf"))
|
return yaml.dump(output, sort_keys=False, width=float("inf"), allow_unicode=True)
|
||||||
|
|
||||||
|
|
||||||
def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class, interactive=True):
|
def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class, interactive=True):
|
||||||
|
@ -7,7 +7,6 @@ from PIL import Image
|
|||||||
|
|
||||||
from modules import chat, shared, ui, utils
|
from modules import chat, shared, ui, utils
|
||||||
from modules.html_generator import chat_html_wrapper
|
from modules.html_generator import chat_html_wrapper
|
||||||
from modules.text_generation import stop_everything_event
|
|
||||||
from modules.utils import gradio
|
from modules.utils import gradio
|
||||||
|
|
||||||
inputs = ('Chat input', 'interface_state')
|
inputs = ('Chat input', 'interface_state')
|
||||||
@ -137,7 +136,7 @@ def create_chat_settings_ui():
|
|||||||
shared.gradio['tavern_json'] = gr.State()
|
shared.gradio['tavern_json'] = gr.State()
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
shared.gradio['tavern_name'] = gr.Textbox(value='', lines=1, label='Name', interactive=False)
|
shared.gradio['tavern_name'] = gr.Textbox(value='', lines=1, label='Name', interactive=False)
|
||||||
shared.gradio['tavern_desc'] = gr.Textbox(value='', lines=4, max_lines=4, label='Description', interactive=False)
|
shared.gradio['tavern_desc'] = gr.Textbox(value='', lines=10, label='Description', interactive=False, elem_classes=['add_scrollbar'])
|
||||||
|
|
||||||
shared.gradio['Submit tavern character'] = gr.Button(value='Submit', interactive=False)
|
shared.gradio['Submit tavern character'] = gr.Button(value='Submit', interactive=False)
|
||||||
|
|
||||||
@ -181,169 +180,112 @@ def create_event_handlers():
|
|||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda x: (x, ''), gradio('textbox'), gradio('Chat input', 'textbox'), show_progress=False).then(
|
lambda x: (x, ''), gradio('textbox'), gradio('Chat input', 'textbox'), show_progress=False).then(
|
||||||
chat.generate_chat_reply_wrapper, gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
chat.generate_chat_reply_wrapper, gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
|
||||||
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['textbox'].submit(
|
shared.gradio['textbox'].submit(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda x: (x, ''), gradio('textbox'), gradio('Chat input', 'textbox'), show_progress=False).then(
|
lambda x: (x, ''), gradio('textbox'), gradio('Chat input', 'textbox'), show_progress=False).then(
|
||||||
chat.generate_chat_reply_wrapper, gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
chat.generate_chat_reply_wrapper, gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
|
||||||
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['Regenerate'].click(
|
shared.gradio['Regenerate'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
partial(chat.generate_chat_reply_wrapper, regenerate=True), gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
partial(chat.generate_chat_reply_wrapper, regenerate=True), gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
|
||||||
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['Continue'].click(
|
shared.gradio['Continue'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
partial(chat.generate_chat_reply_wrapper, _continue=True), gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
partial(chat.generate_chat_reply_wrapper, _continue=True), gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
|
||||||
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['Impersonate'].click(
|
shared.gradio['Impersonate'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda x: x, gradio('textbox'), gradio('Chat input'), show_progress=False).then(
|
lambda x: x, gradio('textbox'), gradio('Chat input'), show_progress=False).then(
|
||||||
chat.impersonate_wrapper, gradio(inputs), gradio('textbox', 'display'), show_progress=False).then(
|
chat.impersonate_wrapper, gradio(inputs), gradio('textbox', 'display'), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['Replace last reply'].click(
|
shared.gradio['Replace last reply'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.replace_last_reply, gradio('textbox', 'interface_state'), gradio('history')).then(
|
chat.handle_replace_last_reply_click, gradio('textbox', 'interface_state'), gradio('history', 'display', 'textbox'), show_progress=False)
|
||||||
lambda: '', None, gradio('textbox'), show_progress=False).then(
|
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
|
||||||
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None)
|
|
||||||
|
|
||||||
shared.gradio['Send dummy message'].click(
|
shared.gradio['Send dummy message'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.send_dummy_message, gradio('textbox', 'interface_state'), gradio('history')).then(
|
chat.handle_send_dummy_message_click, gradio('textbox', 'interface_state'), gradio('history', 'display', 'textbox'), show_progress=False)
|
||||||
lambda: '', None, gradio('textbox'), show_progress=False).then(
|
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
|
||||||
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None)
|
|
||||||
|
|
||||||
shared.gradio['Send dummy reply'].click(
|
shared.gradio['Send dummy reply'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.send_dummy_reply, gradio('textbox', 'interface_state'), gradio('history')).then(
|
chat.handle_send_dummy_reply_click, gradio('textbox', 'interface_state'), gradio('history', 'display', 'textbox'), show_progress=False)
|
||||||
lambda: '', None, gradio('textbox'), show_progress=False).then(
|
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
|
||||||
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None)
|
|
||||||
|
|
||||||
shared.gradio['Remove last'].click(
|
shared.gradio['Remove last'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.remove_last_message, gradio('history'), gradio('textbox', 'history'), show_progress=False).then(
|
chat.handle_remove_last_click, gradio('interface_state'), gradio('history', 'display', 'textbox'), show_progress=False)
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
|
||||||
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None)
|
|
||||||
|
|
||||||
shared.gradio['Stop'].click(
|
shared.gradio['Stop'].click(
|
||||||
stop_everything_event, None, None, queue=False).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display'))
|
chat.handle_stop_click, gradio('interface_state'), gradio('display'), show_progress=False)
|
||||||
|
|
||||||
if not shared.args.multi_user:
|
if not shared.args.multi_user:
|
||||||
shared.gradio['unique_id'].select(
|
shared.gradio['unique_id'].select(
|
||||||
chat.load_history, gradio('unique_id', 'character_menu', 'mode'), gradio('history')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display'))
|
chat.handle_unique_id_select, gradio('interface_state'), gradio('history', 'display'), show_progress=False)
|
||||||
|
|
||||||
shared.gradio['Start new chat'].click(
|
shared.gradio['Start new chat'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.start_new_chat, gradio('interface_state'), gradio('history')).then(
|
chat.handle_start_new_chat_click, gradio('interface_state'), gradio('history', 'display', 'unique_id'), show_progress=False)
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
|
||||||
lambda x: gr.update(choices=(histories := chat.find_all_histories_with_first_prompts(x)), value=histories[0][1]), gradio('interface_state'), gradio('unique_id'), show_progress=False)
|
|
||||||
|
|
||||||
shared.gradio['delete_chat'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, gradio(clear_arr))
|
shared.gradio['delete_chat'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, gradio(clear_arr))
|
||||||
shared.gradio['delete_chat-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, gradio(clear_arr))
|
shared.gradio['delete_chat-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, gradio(clear_arr))
|
||||||
shared.gradio['delete_chat-confirm'].click(
|
shared.gradio['delete_chat-confirm'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda x, y: str(chat.find_all_histories(x).index(y)), gradio('interface_state', 'unique_id'), gradio('temporary_text')).then(
|
chat.handle_delete_chat_confirm_click, gradio('interface_state'), gradio('history', 'display', 'unique_id') + gradio(clear_arr), show_progress=False)
|
||||||
chat.delete_history, gradio('unique_id', 'character_menu', 'mode'), None).then(
|
|
||||||
chat.load_history_after_deletion, gradio('interface_state', 'temporary_text'), gradio('history', 'unique_id'), show_progress=False).then(
|
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
|
||||||
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, gradio(clear_arr))
|
|
||||||
|
|
||||||
shared.gradio['rename_chat'].click(
|
|
||||||
lambda: "My New Chat", None, gradio('rename_to')).then(
|
|
||||||
lambda: [gr.update(visible=True)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False)
|
|
||||||
|
|
||||||
shared.gradio['rename_to-cancel'].click(
|
|
||||||
lambda: [gr.update(visible=False)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False)
|
|
||||||
|
|
||||||
|
shared.gradio['rename_chat'].click(chat.handle_rename_chat_click, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False)
|
||||||
|
shared.gradio['rename_to-cancel'].click(lambda: [gr.update(visible=False)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False)
|
||||||
shared.gradio['rename_to-confirm'].click(
|
shared.gradio['rename_to-confirm'].click(
|
||||||
chat.rename_history, gradio('unique_id', 'rename_to', 'character_menu', 'mode'), None).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda: [gr.update(visible=False)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False).then(
|
chat.handle_rename_chat_confirm, gradio('rename_to', 'interface_state'), gradio('unique_id', 'rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False)
|
||||||
lambda x, y: gr.update(choices=chat.find_all_histories_with_first_prompts(x), value=y), gradio('interface_state', 'rename_to'), gradio('unique_id'))
|
|
||||||
|
|
||||||
shared.gradio['rename_to'].submit(
|
shared.gradio['rename_to'].submit(
|
||||||
chat.rename_history, gradio('unique_id', 'rename_to', 'character_menu', 'mode'), None).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda: [gr.update(visible=False)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False).then(
|
chat.handle_rename_chat_confirm, gradio('rename_to', 'interface_state'), gradio('unique_id', 'rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False)
|
||||||
lambda x, y: gr.update(choices=chat.find_all_histories_with_first_prompts(x), value=y), gradio('interface_state', 'rename_to'), gradio('unique_id'))
|
|
||||||
|
|
||||||
shared.gradio['load_chat_history'].upload(
|
shared.gradio['load_chat_history'].upload(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.start_new_chat, gradio('interface_state'), gradio('history')).then(
|
chat.handle_upload_chat_history, gradio('load_chat_history', 'interface_state'), gradio('history', 'display', 'unique_id'), show_progress=False).then(
|
||||||
chat.load_history_json, gradio('load_chat_history', 'history'), gradio('history')).then(
|
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
|
||||||
lambda x: gr.update(choices=(histories := chat.find_all_histories_with_first_prompts(x)), value=histories[0][1]), gradio('interface_state'), gradio('unique_id'), show_progress=False).then(
|
|
||||||
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_chat()}}')
|
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_chat()}}')
|
||||||
|
|
||||||
shared.gradio['character_menu'].change(
|
shared.gradio['character_menu'].change(
|
||||||
chat.load_character, gradio('character_menu', 'name1', 'name2'), gradio('name1', 'name2', 'character_picture', 'greeting', 'context')).success(
|
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.load_latest_history, gradio('interface_state'), gradio('history')).then(
|
chat.handle_character_menu_change, gradio('interface_state'), gradio('history', 'display', 'name1', 'name2', 'character_picture', 'greeting', 'context', 'unique_id'), show_progress=False).then(
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
|
||||||
lambda x: gr.update(choices=(histories := chat.find_all_histories_with_first_prompts(x)), value=histories[0][1]), gradio('interface_state'), gradio('unique_id'), show_progress=False).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.update_big_picture_js}; updateBigPicture()}}')
|
None, None, None, js=f'() => {{{ui.update_big_picture_js}; updateBigPicture()}}')
|
||||||
|
|
||||||
shared.gradio['mode'].change(None, gradio('mode'), None, js="(mode) => {mode === 'instruct' ? document.getElementById('character-menu').parentNode.parentNode.style.display = 'none' : document.getElementById('character-menu').parentNode.parentNode.style.display = ''}")
|
|
||||||
|
|
||||||
shared.gradio['mode'].change(
|
shared.gradio['mode'].change(
|
||||||
lambda x: [gr.update(visible=x != 'instruct'), gr.update(visible=x == 'chat-instruct')], gradio('mode'), gradio('chat_style', 'chat-instruct_command'), show_progress=False).then(
|
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
chat.load_latest_history, gradio('interface_state'), gradio('history')).then(
|
chat.handle_mode_change, gradio('interface_state'), gradio('history', 'display', 'chat_style', 'chat-instruct_command', 'unique_id'), show_progress=False).then(
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
None, gradio('mode'), None, js="(mode) => {mode === 'instruct' ? document.getElementById('character-menu').parentNode.parentNode.style.display = 'none' : document.getElementById('character-menu').parentNode.parentNode.style.display = ''}")
|
||||||
lambda x: gr.update(choices=(histories := chat.find_all_histories_with_first_prompts(x)), value=histories[0][1]), gradio('interface_state'), gradio('unique_id'), show_progress=False)
|
|
||||||
|
|
||||||
shared.gradio['chat_style'].change(chat.redraw_html, gradio(reload_arr), gradio('display'))
|
shared.gradio['chat_style'].change(chat.redraw_html, gradio(reload_arr), gradio('display'), show_progress=False)
|
||||||
shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, gradio('history'), gradio('textbox'), show_progress=False)
|
shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, gradio('history'), gradio('textbox'), show_progress=False)
|
||||||
|
|
||||||
# Save/delete a character
|
# Save/delete a character
|
||||||
shared.gradio['save_character'].click(
|
shared.gradio['save_character'].click(chat.handle_save_character_click, gradio('name2'), gradio('save_character_filename', 'character_saver'), show_progress=False)
|
||||||
lambda x: x, gradio('name2'), gradio('save_character_filename')).then(
|
shared.gradio['delete_character'].click(lambda: gr.update(visible=True), None, gradio('character_deleter'), show_progress=False)
|
||||||
lambda: gr.update(visible=True), None, gradio('character_saver'))
|
shared.gradio['load_template'].click(chat.handle_load_template_click, gradio('instruction_template'), gradio('instruction_template_str', 'instruction_template'), show_progress=False)
|
||||||
|
|
||||||
shared.gradio['delete_character'].click(lambda: gr.update(visible=True), None, gradio('character_deleter'))
|
|
||||||
|
|
||||||
shared.gradio['load_template'].click(
|
|
||||||
chat.load_instruction_template, gradio('instruction_template'), gradio('instruction_template_str')).then(
|
|
||||||
lambda: "Select template to load...", None, gradio('instruction_template'))
|
|
||||||
|
|
||||||
shared.gradio['save_template'].click(
|
shared.gradio['save_template'].click(
|
||||||
lambda: 'My Template.yaml', None, gradio('save_filename')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda: 'instruction-templates/', None, gradio('save_root')).then(
|
chat.handle_save_template_click, gradio('instruction_template_str'), gradio('save_filename', 'save_root', 'save_contents', 'file_saver'), show_progress=False)
|
||||||
chat.generate_instruction_template_yaml, gradio('instruction_template_str'), gradio('save_contents')).then(
|
|
||||||
lambda: gr.update(visible=True), None, gradio('file_saver'))
|
|
||||||
|
|
||||||
shared.gradio['delete_template'].click(
|
|
||||||
lambda x: f'{x}.yaml', gradio('instruction_template'), gradio('delete_filename')).then(
|
|
||||||
lambda: 'instruction-templates/', None, gradio('delete_root')).then(
|
|
||||||
lambda: gr.update(visible=True), None, gradio('file_deleter'))
|
|
||||||
|
|
||||||
|
shared.gradio['delete_template'].click(chat.handle_delete_template_click, gradio('instruction_template'), gradio('delete_filename', 'delete_root', 'file_deleter'), show_progress=False)
|
||||||
shared.gradio['save_chat_history'].click(
|
shared.gradio['save_chat_history'].click(
|
||||||
lambda x: json.dumps(x, indent=4), gradio('history'), gradio('temporary_text')).then(
|
lambda x: json.dumps(x, indent=4), gradio('history'), gradio('temporary_text')).then(
|
||||||
None, gradio('temporary_text', 'character_menu', 'mode'), None, js=f'(hist, char, mode) => {{{ui.save_files_js}; saveHistory(hist, char, mode)}}')
|
None, gradio('temporary_text', 'character_menu', 'mode'), None, js=f'(hist, char, mode) => {{{ui.save_files_js}; saveHistory(hist, char, mode)}}')
|
||||||
|
|
||||||
shared.gradio['Submit character'].click(
|
shared.gradio['Submit character'].click(
|
||||||
chat.upload_character, gradio('upload_json', 'upload_img_bot'), gradio('character_menu')).then(
|
chat.upload_character, gradio('upload_json', 'upload_img_bot'), gradio('character_menu'), show_progress=False).then(
|
||||||
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_character()}}')
|
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_character()}}')
|
||||||
|
|
||||||
shared.gradio['Submit tavern character'].click(
|
shared.gradio['Submit tavern character'].click(
|
||||||
chat.upload_tavern_character, gradio('upload_img_tavern', 'tavern_json'), gradio('character_menu')).then(
|
chat.upload_tavern_character, gradio('upload_img_tavern', 'tavern_json'), gradio('character_menu'), show_progress=False).then(
|
||||||
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_character()}}')
|
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_character()}}')
|
||||||
|
|
||||||
shared.gradio['upload_json'].upload(lambda: gr.update(interactive=True), None, gradio('Submit character'))
|
shared.gradio['upload_json'].upload(lambda: gr.update(interactive=True), None, gradio('Submit character'))
|
||||||
@ -351,35 +293,32 @@ def create_event_handlers():
|
|||||||
shared.gradio['upload_img_tavern'].upload(chat.check_tavern_character, gradio('upload_img_tavern'), gradio('tavern_name', 'tavern_desc', 'tavern_json', 'Submit tavern character'), show_progress=False)
|
shared.gradio['upload_img_tavern'].upload(chat.check_tavern_character, gradio('upload_img_tavern'), gradio('tavern_name', 'tavern_desc', 'tavern_json', 'Submit tavern character'), show_progress=False)
|
||||||
shared.gradio['upload_img_tavern'].clear(lambda: (None, None, None, gr.update(interactive=False)), None, gradio('tavern_name', 'tavern_desc', 'tavern_json', 'Submit tavern character'), show_progress=False)
|
shared.gradio['upload_img_tavern'].clear(lambda: (None, None, None, gr.update(interactive=False)), None, gradio('tavern_name', 'tavern_desc', 'tavern_json', 'Submit tavern character'), show_progress=False)
|
||||||
shared.gradio['your_picture'].change(
|
shared.gradio['your_picture'].change(
|
||||||
chat.upload_your_profile_picture, gradio('your_picture'), None).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
partial(chat.redraw_html, reset_cache=True), gradio(reload_arr), gradio('display'))
|
chat.handle_your_picture_change, gradio('your_picture', 'interface_state'), gradio('display'), show_progress=False)
|
||||||
|
|
||||||
shared.gradio['send_instruction_to_default'].click(
|
shared.gradio['send_instruction_to_default'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda x: x.update({'mode': 'instruct', 'history': {'internal': [], 'visible': []}}), gradio('interface_state'), None).then(
|
chat.handle_send_instruction_click, gradio('interface_state'), gradio('textbox-default'), show_progress=False).then(
|
||||||
partial(chat.generate_chat_prompt, 'Input'), gradio('interface_state'), gradio('textbox-default')).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_default()}}')
|
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_default()}}')
|
||||||
|
|
||||||
shared.gradio['send_instruction_to_notebook'].click(
|
shared.gradio['send_instruction_to_notebook'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda x: x.update({'mode': 'instruct', 'history': {'internal': [], 'visible': []}}), gradio('interface_state'), None).then(
|
chat.handle_send_instruction_click, gradio('interface_state'), gradio('textbox-notebook'), show_progress=False).then(
|
||||||
partial(chat.generate_chat_prompt, 'Input'), gradio('interface_state'), gradio('textbox-notebook')).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_notebook()}}')
|
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_notebook()}}')
|
||||||
|
|
||||||
shared.gradio['send_instruction_to_negative_prompt'].click(
|
shared.gradio['send_instruction_to_negative_prompt'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
lambda x: x.update({'mode': 'instruct', 'history': {'internal': [], 'visible': []}}), gradio('interface_state'), None).then(
|
chat.handle_send_instruction_click, gradio('interface_state'), gradio('negative_prompt'), show_progress=False).then(
|
||||||
partial(chat.generate_chat_prompt, 'Input'), gradio('interface_state'), gradio('negative_prompt')).then(
|
|
||||||
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_generation_parameters()}}')
|
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_generation_parameters()}}')
|
||||||
|
|
||||||
shared.gradio['send-chat-to-default'].click(
|
shared.gradio['send-chat-to-default'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
partial(chat.generate_chat_prompt, '', _continue=True), gradio('interface_state'), gradio('textbox-default')).then(
|
chat.handle_send_chat_click, gradio('interface_state'), gradio('textbox-default'), show_progress=False).then(
|
||||||
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_default()}}')
|
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_default()}}')
|
||||||
|
|
||||||
shared.gradio['send-chat-to-notebook'].click(
|
shared.gradio['send-chat-to-notebook'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
partial(chat.generate_chat_prompt, '', _continue=True), gradio('interface_state'), gradio('textbox-notebook')).then(
|
chat.handle_send_chat_click, gradio('interface_state'), gradio('textbox-notebook'), show_progress=False).then(
|
||||||
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_notebook()}}')
|
None, None, None, js=f'() => {{{ui.switch_tabs_js}; switch_to_notebook()}}')
|
||||||
|
|
||||||
shared.gradio['show_controls'].change(None, gradio('show_controls'), None, js=f'(x) => {{{ui.show_controls_js}; toggle_controls(x)}}')
|
shared.gradio['show_controls'].change(None, gradio('show_controls'), None, js=f'(x) => {{{ui.show_controls_js}; toggle_controls(x)}}')
|
||||||
|
@ -64,38 +64,46 @@ def create_event_handlers():
|
|||||||
shared.gradio['Generate-default'].click(
|
shared.gradio['Generate-default'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
lambda state, left, right: state.update({'textbox-default': left, 'output_textbox': right}), gradio('interface_state', 'textbox-default', 'output_textbox'), None).then(
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['textbox-default'].submit(
|
shared.gradio['textbox-default'].submit(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
lambda state, left, right: state.update({'textbox-default': left, 'output_textbox': right}), gradio('interface_state', 'textbox-default', 'output_textbox'), None).then(
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['markdown_render-default'].click(lambda x: x, gradio('output_textbox'), gradio('markdown-default'), queue=False)
|
|
||||||
shared.gradio['Continue-default'].click(
|
shared.gradio['Continue-default'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
generate_reply_wrapper, [shared.gradio['output_textbox']] + gradio(inputs)[1:], gradio(outputs), show_progress=False).then(
|
generate_reply_wrapper, [shared.gradio['output_textbox']] + gradio(inputs)[1:], gradio(outputs), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
lambda state, left, right: state.update({'textbox-default': left, 'output_textbox': right}), gradio('interface_state', 'textbox-default', 'output_textbox'), None).then(
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['Stop-default'].click(stop_everything_event, None, None, queue=False)
|
shared.gradio['Stop-default'].click(stop_everything_event, None, None, queue=False)
|
||||||
|
shared.gradio['markdown_render-default'].click(lambda x: x, gradio('output_textbox'), gradio('markdown-default'), queue=False)
|
||||||
shared.gradio['prompt_menu-default'].change(load_prompt, gradio('prompt_menu-default'), gradio('textbox-default'), show_progress=False)
|
shared.gradio['prompt_menu-default'].change(load_prompt, gradio('prompt_menu-default'), gradio('textbox-default'), show_progress=False)
|
||||||
shared.gradio['save_prompt-default'].click(
|
shared.gradio['save_prompt-default'].click(handle_save_prompt, gradio('textbox-default'), gradio('save_contents', 'save_filename', 'save_root', 'file_saver'), show_progress=False)
|
||||||
lambda x: x, gradio('textbox-default'), gradio('save_contents')).then(
|
shared.gradio['delete_prompt-default'].click(handle_delete_prompt, gradio('prompt_menu-default'), gradio('delete_filename', 'delete_root', 'file_deleter'), show_progress=False)
|
||||||
lambda: 'prompts/', None, gradio('save_root')).then(
|
|
||||||
lambda: utils.current_time() + '.txt', None, gradio('save_filename')).then(
|
|
||||||
lambda: gr.update(visible=True), None, gradio('file_saver'))
|
|
||||||
|
|
||||||
shared.gradio['delete_prompt-default'].click(
|
|
||||||
lambda: 'prompts/', None, gradio('delete_root')).then(
|
|
||||||
lambda x: x + '.txt', gradio('prompt_menu-default'), gradio('delete_filename')).then(
|
|
||||||
lambda: gr.update(visible=True), None, gradio('file_deleter'))
|
|
||||||
|
|
||||||
shared.gradio['textbox-default'].change(lambda x: f"<span>{count_tokens(x)}</span>", gradio('textbox-default'), gradio('token-counter-default'), show_progress=False)
|
shared.gradio['textbox-default'].change(lambda x: f"<span>{count_tokens(x)}</span>", gradio('textbox-default'), gradio('token-counter-default'), show_progress=False)
|
||||||
shared.gradio['get_logits-default'].click(
|
shared.gradio['get_logits-default'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
logits.get_next_logits, gradio('textbox-default', 'interface_state', 'use_samplers-default', 'logits-default'), gradio('logits-default', 'logits-default-previous'), show_progress=False)
|
logits.get_next_logits, gradio('textbox-default', 'interface_state', 'use_samplers-default', 'logits-default'), gradio('logits-default', 'logits-default-previous'), show_progress=False)
|
||||||
|
|
||||||
shared.gradio['get_tokens-default'].click(get_token_ids, gradio('textbox-default'), gradio('tokens-default'), show_progress=False)
|
shared.gradio['get_tokens-default'].click(get_token_ids, gradio('textbox-default'), gradio('tokens-default'), show_progress=False)
|
||||||
|
|
||||||
|
|
||||||
|
def handle_save_prompt(text):
|
||||||
|
return [
|
||||||
|
text,
|
||||||
|
utils.current_time() + ".txt",
|
||||||
|
"prompts/",
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_delete_prompt(prompt):
|
||||||
|
return [
|
||||||
|
prompt + ".txt",
|
||||||
|
"prompts/",
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import traceback
|
||||||
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
|
|
||||||
from modules import chat, presets, shared, ui, utils
|
from modules import chat, presets, shared, ui, utils
|
||||||
@ -47,57 +49,119 @@ def create_ui():
|
|||||||
|
|
||||||
|
|
||||||
def create_event_handlers():
|
def create_event_handlers():
|
||||||
shared.gradio['save_confirm'].click(
|
|
||||||
lambda x, y, z: utils.save_file(x + y, z), gradio('save_root', 'save_filename', 'save_contents'), None).then(
|
|
||||||
lambda: gr.update(visible=False), None, gradio('file_saver'))
|
|
||||||
|
|
||||||
shared.gradio['delete_confirm'].click(
|
|
||||||
lambda x, y: utils.delete_file(x + y), gradio('delete_root', 'delete_filename'), None).then(
|
|
||||||
lambda: gr.update(visible=False), None, gradio('file_deleter'))
|
|
||||||
|
|
||||||
shared.gradio['delete_cancel'].click(lambda: gr.update(visible=False), None, gradio('file_deleter'))
|
|
||||||
shared.gradio['save_cancel'].click(lambda: gr.update(visible=False), None, gradio('file_saver'))
|
|
||||||
|
|
||||||
shared.gradio['save_character_confirm'].click(
|
|
||||||
chat.save_character, gradio('name2', 'greeting', 'context', 'character_picture', 'save_character_filename'), None).then(
|
|
||||||
lambda: gr.update(visible=False), None, gradio('character_saver')).then(
|
|
||||||
lambda x: gr.update(choices=utils.get_available_characters(), value=x), gradio('save_character_filename'), gradio('character_menu'))
|
|
||||||
|
|
||||||
shared.gradio['delete_character_confirm'].click(
|
|
||||||
lambda x: str(utils.get_available_characters().index(x)), gradio('character_menu'), gradio('temporary_text')).then(
|
|
||||||
chat.delete_character, gradio('character_menu'), None).then(
|
|
||||||
chat.update_character_menu_after_deletion, gradio('temporary_text'), gradio('character_menu')).then(
|
|
||||||
lambda: gr.update(visible=False), None, gradio('character_deleter'))
|
|
||||||
|
|
||||||
shared.gradio['save_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_saver'))
|
|
||||||
shared.gradio['delete_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_deleter'))
|
|
||||||
|
|
||||||
shared.gradio['save_preset'].click(
|
shared.gradio['save_preset'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
presets.generate_preset_yaml, gradio('interface_state'), gradio('save_preset_contents')).then(
|
handle_save_preset_click, gradio('interface_state'), gradio('save_preset_contents', 'save_preset_filename', 'preset_saver'), show_progress=False)
|
||||||
lambda: 'My Preset', None, gradio('save_preset_filename')).then(
|
|
||||||
lambda: gr.update(visible=True), None, gradio('preset_saver'))
|
|
||||||
|
|
||||||
shared.gradio['save_preset_confirm'].click(
|
shared.gradio['delete_preset'].click(handle_delete_preset_click, gradio('preset_menu'), gradio('delete_filename', 'delete_root', 'file_deleter'), show_progress=False)
|
||||||
lambda x, y: utils.save_file(f'presets/{x}.yaml', y), gradio('save_preset_filename', 'save_preset_contents'), None).then(
|
shared.gradio['save_grammar'].click(handle_save_grammar_click, gradio('grammar_string'), gradio('save_contents', 'save_filename', 'save_root', 'file_saver'), show_progress=False)
|
||||||
lambda: gr.update(visible=False), None, gradio('preset_saver')).then(
|
shared.gradio['delete_grammar'].click(handle_delete_grammar_click, gradio('grammar_file'), gradio('delete_filename', 'delete_root', 'file_deleter'), show_progress=False)
|
||||||
lambda x: gr.update(choices=utils.get_available_presets(), value=x), gradio('save_preset_filename'), gradio('preset_menu'))
|
|
||||||
|
|
||||||
shared.gradio['save_preset_cancel'].click(lambda: gr.update(visible=False), None, gradio('preset_saver'))
|
shared.gradio['save_preset_confirm'].click(handle_save_preset_confirm_click, gradio('save_preset_filename', 'save_preset_contents'), gradio('preset_menu', 'preset_saver'), show_progress=False)
|
||||||
|
shared.gradio['save_confirm'].click(handle_save_confirm_click, gradio('save_root', 'save_filename', 'save_contents'), gradio('file_saver'), show_progress=False)
|
||||||
|
shared.gradio['delete_confirm'].click(handle_delete_confirm_click, gradio('delete_root', 'delete_filename'), gradio('file_deleter'), show_progress=False)
|
||||||
|
shared.gradio['save_character_confirm'].click(handle_save_character_confirm_click, gradio('name2', 'greeting', 'context', 'character_picture', 'save_character_filename'), gradio('character_menu', 'character_saver'), show_progress=False)
|
||||||
|
shared.gradio['delete_character_confirm'].click(handle_delete_character_confirm_click, gradio('character_menu'), gradio('character_menu', 'character_deleter'), show_progress=False)
|
||||||
|
|
||||||
shared.gradio['delete_preset'].click(
|
shared.gradio['save_preset_cancel'].click(lambda: gr.update(visible=False), None, gradio('preset_saver'), show_progress=False)
|
||||||
lambda x: f'{x}.yaml', gradio('preset_menu'), gradio('delete_filename')).then(
|
shared.gradio['save_cancel'].click(lambda: gr.update(visible=False), None, gradio('file_saver'))
|
||||||
lambda: 'presets/', None, gradio('delete_root')).then(
|
shared.gradio['delete_cancel'].click(lambda: gr.update(visible=False), None, gradio('file_deleter'))
|
||||||
lambda: gr.update(visible=True), None, gradio('file_deleter'))
|
shared.gradio['save_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_saver'), show_progress=False)
|
||||||
|
shared.gradio['delete_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_deleter'), show_progress=False)
|
||||||
|
|
||||||
shared.gradio['save_grammar'].click(
|
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
|
||||||
lambda x: x, gradio('grammar_string'), gradio('save_contents')).then(
|
|
||||||
lambda: 'grammars/', None, gradio('save_root')).then(
|
|
||||||
lambda: 'My Fancy Grammar.gbnf', None, gradio('save_filename')).then(
|
|
||||||
lambda: gr.update(visible=True), None, gradio('file_saver'))
|
|
||||||
|
|
||||||
shared.gradio['delete_grammar'].click(
|
def handle_save_preset_confirm_click(filename, contents):
|
||||||
lambda x: x, gradio('grammar_file'), gradio('delete_filename')).then(
|
try:
|
||||||
lambda: 'grammars/', None, gradio('delete_root')).then(
|
utils.save_file(f"presets/{filename}.yaml", contents)
|
||||||
lambda: gr.update(visible=True), None, gradio('file_deleter'))
|
available_presets = utils.get_available_presets()
|
||||||
|
output = gr.update(choices=available_presets, value=filename),
|
||||||
|
except Exception:
|
||||||
|
output = gr.update()
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
return [
|
||||||
|
output,
|
||||||
|
gr.update(visible=False)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_save_confirm_click(root, filename, contents):
|
||||||
|
try:
|
||||||
|
utils.save_file(root + filename, contents)
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
return gr.update(visible=False)
|
||||||
|
|
||||||
|
|
||||||
|
def handle_delete_confirm_click(root, filename):
|
||||||
|
try:
|
||||||
|
utils.delete_file(root + filename)
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
return gr.update(visible=False)
|
||||||
|
|
||||||
|
|
||||||
|
def handle_save_character_confirm_click(name2, greeting, context, character_picture, filename):
|
||||||
|
try:
|
||||||
|
chat.save_character(name2, greeting, context, character_picture, filename)
|
||||||
|
available_characters = utils.get_available_characters()
|
||||||
|
output = gr.update(choices=available_characters, value=filename),
|
||||||
|
except Exception:
|
||||||
|
output = gr.update()
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
return [
|
||||||
|
output,
|
||||||
|
gr.update(visible=False)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_delete_character_confirm_click(character):
|
||||||
|
try:
|
||||||
|
index = str(utils.get_available_characters().index(character))
|
||||||
|
chat.delete_character(character)
|
||||||
|
output = chat.update_character_menu_after_deletion(index)
|
||||||
|
except Exception:
|
||||||
|
output = gr.update()
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
return [
|
||||||
|
output,
|
||||||
|
gr.update(visible=False)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_save_preset_click(state):
|
||||||
|
contents = presets.generate_preset_yaml(state)
|
||||||
|
return [
|
||||||
|
contents,
|
||||||
|
"My Preset",
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_delete_preset_click(preset):
|
||||||
|
return [
|
||||||
|
f"{preset}.yaml",
|
||||||
|
"presets/",
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_save_grammar_click(grammar_string):
|
||||||
|
return [
|
||||||
|
grammar_string,
|
||||||
|
"My Fancy Grammar.gbnf",
|
||||||
|
"grammars/",
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_delete_grammar_click(grammar_file):
|
||||||
|
return [
|
||||||
|
grammar_file,
|
||||||
|
"grammars/",
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
@ -66,7 +66,6 @@ def create_ui():
|
|||||||
ui.create_refresh_button(shared.gradio['model_menu'], lambda: None, lambda: {'choices': utils.get_available_models()}, 'refresh-button', interactive=not mu)
|
ui.create_refresh_button(shared.gradio['model_menu'], lambda: None, lambda: {'choices': utils.get_available_models()}, 'refresh-button', interactive=not mu)
|
||||||
shared.gradio['load_model'] = gr.Button("Load", visible=not shared.settings['autoload_model'], elem_classes='refresh-button', interactive=not mu)
|
shared.gradio['load_model'] = gr.Button("Load", visible=not shared.settings['autoload_model'], elem_classes='refresh-button', interactive=not mu)
|
||||||
shared.gradio['unload_model'] = gr.Button("Unload", elem_classes='refresh-button', interactive=not mu)
|
shared.gradio['unload_model'] = gr.Button("Unload", elem_classes='refresh-button', interactive=not mu)
|
||||||
shared.gradio['reload_model'] = gr.Button("Reload", elem_classes='refresh-button', interactive=not mu)
|
|
||||||
shared.gradio['save_model_settings'] = gr.Button("Save settings", elem_classes='refresh-button', interactive=not mu)
|
shared.gradio['save_model_settings'] = gr.Button("Save settings", elem_classes='refresh-button', interactive=not mu)
|
||||||
|
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
@ -95,7 +94,7 @@ def create_ui():
|
|||||||
shared.gradio['hqq_backend'] = gr.Dropdown(label="hqq_backend", choices=["PYTORCH", "PYTORCH_COMPILE", "ATEN"], value=shared.args.hqq_backend)
|
shared.gradio['hqq_backend'] = gr.Dropdown(label="hqq_backend", choices=["PYTORCH", "PYTORCH_COMPILE", "ATEN"], value=shared.args.hqq_backend)
|
||||||
shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=256, value=shared.args.n_gpu_layers, info='Must be set to more than 0 for your GPU to be used.')
|
shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=256, value=shared.args.n_gpu_layers, info='Must be set to more than 0 for your GPU to be used.')
|
||||||
shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=shared.settings['truncation_length_max'], step=256, label="n_ctx", value=shared.args.n_ctx, info='Context length. Try lowering this if you run out of memory while loading the model.')
|
shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=shared.settings['truncation_length_max'], step=256, label="n_ctx", value=shared.args.n_ctx, info='Context length. Try lowering this if you run out of memory while loading the model.')
|
||||||
shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='List of proportions to split the model across multiple GPUs. Example: 18,17')
|
shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='List of proportions to split the model across multiple GPUs. Example: 60,40')
|
||||||
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, step=1, value=shared.args.n_batch)
|
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, step=1, value=shared.args.n_batch)
|
||||||
shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=256, value=shared.args.threads)
|
shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=256, value=shared.args.threads)
|
||||||
shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=256, value=shared.args.threads_batch)
|
shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=256, value=shared.args.threads_batch)
|
||||||
@ -104,9 +103,9 @@ def create_ui():
|
|||||||
shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7')
|
shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7')
|
||||||
shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=shared.settings['truncation_length_max'], step=256, info='Context length. Try lowering this if you run out of memory while loading the model.', value=shared.args.max_seq_len)
|
shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=shared.settings['truncation_length_max'], step=256, info='Context length. Try lowering this if you run out of memory while loading the model.', value=shared.args.max_seq_len)
|
||||||
with gr.Blocks():
|
with gr.Blocks():
|
||||||
shared.gradio['alpha_value'] = gr.Slider(label='alpha_value', minimum=1, maximum=8, step=0.05, info='Positional embeddings alpha factor for NTK RoPE scaling. Recommended values (NTKv1): 1.75 for 1.5x context, 2.5 for 2x context. Use either this or compress_pos_emb, not both.', value=shared.args.alpha_value)
|
shared.gradio['alpha_value'] = gr.Number(label='alpha_value', value=shared.args.alpha_value, precision=2, info='Positional embeddings alpha factor for NTK RoPE scaling. Recommended values (NTKv1): 1.75 for 1.5x context, 2.5 for 2x context. Use either this or compress_pos_emb, not both.')
|
||||||
shared.gradio['rope_freq_base'] = gr.Slider(label='rope_freq_base', minimum=0, maximum=20000000, step=1000, info='If greater than 0, will be used instead of alpha_value. Those two are related by rope_freq_base = 10000 * alpha_value ^ (64 / 63)', value=shared.args.rope_freq_base)
|
shared.gradio['rope_freq_base'] = gr.Number(label='rope_freq_base', value=shared.args.rope_freq_base, precision=0, info='Positional embeddings frequency base for NTK RoPE scaling. Related to alpha_value by rope_freq_base = 10000 * alpha_value ^ (64 / 63). 0 = from model.')
|
||||||
shared.gradio['compress_pos_emb'] = gr.Slider(label='compress_pos_emb', minimum=1, maximum=8, step=0.1, info='Positional embeddings compression factor. Should be set to (context length) / (model\'s original context length). Equal to 1/rope_freq_scale.', value=shared.args.compress_pos_emb)
|
shared.gradio['compress_pos_emb'] = gr.Number(label='compress_pos_emb', value=shared.args.compress_pos_emb, precision=0, info='Positional embeddings compression factor. Should be set to (context length) / (model\'s original context length). Equal to 1/rope_freq_scale.')
|
||||||
|
|
||||||
shared.gradio['autogptq_info'] = gr.Markdown('ExLlamav2_HF is recommended over AutoGPTQ for models derived from Llama.')
|
shared.gradio['autogptq_info'] = gr.Markdown('ExLlamav2_HF is recommended over AutoGPTQ for models derived from Llama.')
|
||||||
|
|
||||||
@ -118,7 +117,7 @@ def create_ui():
|
|||||||
shared.gradio['use_eager_attention'] = gr.Checkbox(label="use_eager_attention", value=shared.args.use_eager_attention, info='Set attn_implementation= eager while loading the model.')
|
shared.gradio['use_eager_attention'] = gr.Checkbox(label="use_eager_attention", value=shared.args.use_eager_attention, info='Set attn_implementation= eager while loading the model.')
|
||||||
shared.gradio['flash_attn'] = gr.Checkbox(label="flash_attn", value=shared.args.flash_attn, info='Use flash-attention.')
|
shared.gradio['flash_attn'] = gr.Checkbox(label="flash_attn", value=shared.args.flash_attn, info='Use flash-attention.')
|
||||||
shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices)
|
shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices)
|
||||||
shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards.')
|
shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This may increase performance on newer cards.')
|
||||||
shared.gradio['cache_8bit'] = gr.Checkbox(label="cache_8bit", value=shared.args.cache_8bit, info='Use 8-bit cache to save VRAM.')
|
shared.gradio['cache_8bit'] = gr.Checkbox(label="cache_8bit", value=shared.args.cache_8bit, info='Use 8-bit cache to save VRAM.')
|
||||||
shared.gradio['cache_4bit'] = gr.Checkbox(label="cache_4bit", value=shared.args.cache_4bit, info='Use Q4 cache to save VRAM.')
|
shared.gradio['cache_4bit'] = gr.Checkbox(label="cache_4bit", value=shared.args.cache_4bit, info='Use Q4 cache to save VRAM.')
|
||||||
shared.gradio['streaming_llm'] = gr.Checkbox(label="streaming_llm", value=shared.args.streaming_llm, info='(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
|
shared.gradio['streaming_llm'] = gr.Checkbox(label="streaming_llm", value=shared.args.streaming_llm, info='(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
|
||||||
@ -188,39 +187,24 @@ def create_ui():
|
|||||||
|
|
||||||
|
|
||||||
def create_event_handlers():
|
def create_event_handlers():
|
||||||
shared.gradio['loader'].change(loaders.make_loader_params_visible, gradio('loader'), gradio(loaders.get_all_params()))
|
shared.gradio['loader'].change(loaders.make_loader_params_visible, gradio('loader'), gradio(loaders.get_all_params()), show_progress=False)
|
||||||
|
|
||||||
# In this event handler, the interface state is read and updated
|
# In this event handler, the interface state is read and updated
|
||||||
# with the model defaults (if any), and then the model is loaded
|
# with the model defaults (if any), and then the model is loaded
|
||||||
# unless "autoload_model" is unchecked
|
# unless "autoload_model" is unchecked
|
||||||
shared.gradio['model_menu'].change(
|
shared.gradio['model_menu'].change(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
apply_model_settings_to_state, gradio('model_menu', 'interface_state'), gradio('interface_state')).then(
|
handle_load_model_event_initial, gradio('model_menu', 'interface_state'), gradio(ui.list_interface_input_elements()) + gradio('interface_state'), show_progress=False).then(
|
||||||
ui.apply_interface_values, gradio('interface_state'), gradio(ui.list_interface_input_elements()), show_progress=False).then(
|
|
||||||
update_model_parameters, gradio('interface_state'), None).then(
|
|
||||||
load_model_wrapper, gradio('model_menu', 'loader', 'autoload_model'), gradio('model_status'), show_progress=False).success(
|
load_model_wrapper, gradio('model_menu', 'loader', 'autoload_model'), gradio('model_status'), show_progress=False).success(
|
||||||
update_truncation_length, gradio('truncation_length', 'interface_state'), gradio('truncation_length')).then(
|
handle_load_model_event_final, gradio('truncation_length', 'loader', 'interface_state'), gradio('truncation_length', 'filter_by_loader'), show_progress=False)
|
||||||
lambda x: x, gradio('loader'), gradio('filter_by_loader'))
|
|
||||||
|
|
||||||
shared.gradio['load_model'].click(
|
shared.gradio['load_model'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
update_model_parameters, gradio('interface_state'), None).then(
|
update_model_parameters, gradio('interface_state'), None).then(
|
||||||
partial(load_model_wrapper, autoload=True), gradio('model_menu', 'loader'), gradio('model_status'), show_progress=False).success(
|
partial(load_model_wrapper, autoload=True), gradio('model_menu', 'loader'), gradio('model_status'), show_progress=False).success(
|
||||||
update_truncation_length, gradio('truncation_length', 'interface_state'), gradio('truncation_length')).then(
|
handle_load_model_event_final, gradio('truncation_length', 'loader', 'interface_state'), gradio('truncation_length', 'filter_by_loader'), show_progress=False)
|
||||||
lambda x: x, gradio('loader'), gradio('filter_by_loader'))
|
|
||||||
|
|
||||||
shared.gradio['reload_model'].click(
|
|
||||||
unload_model, None, None).then(
|
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
|
||||||
update_model_parameters, gradio('interface_state'), None).then(
|
|
||||||
partial(load_model_wrapper, autoload=True), gradio('model_menu', 'loader'), gradio('model_status'), show_progress=False).success(
|
|
||||||
update_truncation_length, gradio('truncation_length', 'interface_state'), gradio('truncation_length')).then(
|
|
||||||
lambda x: x, gradio('loader'), gradio('filter_by_loader'))
|
|
||||||
|
|
||||||
shared.gradio['unload_model'].click(
|
|
||||||
unload_model, None, None).then(
|
|
||||||
lambda: "Model unloaded", None, gradio('model_status'))
|
|
||||||
|
|
||||||
|
shared.gradio['unload_model'].click(handle_unload_model_click, None, gradio('model_status'), show_progress=False)
|
||||||
shared.gradio['save_model_settings'].click(
|
shared.gradio['save_model_settings'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
save_model_settings, gradio('model_menu', 'interface_state'), gradio('model_status'), show_progress=False)
|
save_model_settings, gradio('model_menu', 'interface_state'), gradio('model_status'), show_progress=False)
|
||||||
@ -353,3 +337,20 @@ def update_truncation_length(current_length, state):
|
|||||||
return state['n_ctx']
|
return state['n_ctx']
|
||||||
|
|
||||||
return current_length
|
return current_length
|
||||||
|
|
||||||
|
|
||||||
|
def handle_load_model_event_initial(model, state):
|
||||||
|
state = apply_model_settings_to_state(model, state)
|
||||||
|
output = ui.apply_interface_values(state)
|
||||||
|
update_model_parameters(state)
|
||||||
|
return output + [state]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_load_model_event_final(truncation_length, loader, state):
|
||||||
|
truncation_length = update_truncation_length(truncation_length, state)
|
||||||
|
return [truncation_length, loader]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_unload_model_click():
|
||||||
|
unload_model()
|
||||||
|
return "Model unloaded"
|
||||||
|
@ -7,6 +7,7 @@ from modules.text_generation import (
|
|||||||
get_token_ids,
|
get_token_ids,
|
||||||
stop_everything_event
|
stop_everything_event
|
||||||
)
|
)
|
||||||
|
from modules.ui_default import handle_delete_prompt, handle_save_prompt
|
||||||
from modules.utils import gradio
|
from modules.utils import gradio
|
||||||
|
|
||||||
inputs = ('textbox-notebook', 'interface_state')
|
inputs = ('textbox-notebook', 'interface_state')
|
||||||
@ -66,38 +67,32 @@ def create_event_handlers():
|
|||||||
lambda x: x, gradio('textbox-notebook'), gradio('last_input-notebook')).then(
|
lambda x: x, gradio('textbox-notebook'), gradio('last_input-notebook')).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
lambda state, text: state.update({'textbox-notebook': text}), gradio('interface_state', 'textbox-notebook'), None).then(
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['textbox-notebook'].submit(
|
shared.gradio['textbox-notebook'].submit(
|
||||||
lambda x: x, gradio('textbox-notebook'), gradio('last_input-notebook')).then(
|
lambda x: x, gradio('textbox-notebook'), gradio('last_input-notebook')).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
lambda state, text: state.update({'textbox-notebook': text}), gradio('interface_state', 'textbox-notebook'), None).then(
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
shared.gradio['Undo'].click(lambda x: x, gradio('last_input-notebook'), gradio('textbox-notebook'), show_progress=False)
|
|
||||||
shared.gradio['markdown_render-notebook'].click(lambda x: x, gradio('textbox-notebook'), gradio('markdown-notebook'), queue=False)
|
|
||||||
shared.gradio['Regenerate-notebook'].click(
|
shared.gradio['Regenerate-notebook'].click(
|
||||||
lambda x: x, gradio('last_input-notebook'), gradio('textbox-notebook'), show_progress=False).then(
|
lambda x: x, gradio('last_input-notebook'), gradio('textbox-notebook'), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
lambda state, text: state.update({'textbox-notebook': text}), gradio('interface_state', 'textbox-notebook'), None).then(
|
||||||
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
None, None, None, js=f'() => {{{ui.audio_notification_js}}}')
|
||||||
|
|
||||||
|
shared.gradio['Undo'].click(
|
||||||
|
lambda x: x, gradio('last_input-notebook'), gradio('textbox-notebook'), show_progress=False).then(
|
||||||
|
lambda state, text: state.update({'textbox-notebook': text}), gradio('interface_state', 'textbox-notebook'), None)
|
||||||
|
|
||||||
|
shared.gradio['markdown_render-notebook'].click(lambda x: x, gradio('textbox-notebook'), gradio('markdown-notebook'), queue=False)
|
||||||
shared.gradio['Stop-notebook'].click(stop_everything_event, None, None, queue=False)
|
shared.gradio['Stop-notebook'].click(stop_everything_event, None, None, queue=False)
|
||||||
shared.gradio['prompt_menu-notebook'].change(load_prompt, gradio('prompt_menu-notebook'), gradio('textbox-notebook'), show_progress=False)
|
shared.gradio['prompt_menu-notebook'].change(load_prompt, gradio('prompt_menu-notebook'), gradio('textbox-notebook'), show_progress=False)
|
||||||
shared.gradio['save_prompt-notebook'].click(
|
shared.gradio['save_prompt-notebook'].click(handle_save_prompt, gradio('textbox-notebook'), gradio('save_contents', 'save_filename', 'save_root', 'file_saver'), show_progress=False)
|
||||||
lambda x: x, gradio('textbox-notebook'), gradio('save_contents')).then(
|
shared.gradio['delete_prompt-notebook'].click(handle_delete_prompt, gradio('prompt_menu-notebook'), gradio('delete_filename', 'delete_root', 'file_deleter'), show_progress=False)
|
||||||
lambda: 'prompts/', None, gradio('save_root')).then(
|
|
||||||
lambda: utils.current_time() + '.txt', None, gradio('save_filename')).then(
|
|
||||||
lambda: gr.update(visible=True), None, gradio('file_saver'))
|
|
||||||
|
|
||||||
shared.gradio['delete_prompt-notebook'].click(
|
|
||||||
lambda: 'prompts/', None, gradio('delete_root')).then(
|
|
||||||
lambda x: x + '.txt', gradio('prompt_menu-notebook'), gradio('delete_filename')).then(
|
|
||||||
lambda: gr.update(visible=True), None, gradio('file_deleter'))
|
|
||||||
|
|
||||||
shared.gradio['textbox-notebook'].input(lambda x: f"<span>{count_tokens(x)}</span>", gradio('textbox-notebook'), gradio('token-counter-notebook'), show_progress=False)
|
shared.gradio['textbox-notebook'].input(lambda x: f"<span>{count_tokens(x)}</span>", gradio('textbox-notebook'), gradio('token-counter-notebook'), show_progress=False)
|
||||||
shared.gradio['get_logits-notebook'].click(
|
shared.gradio['get_logits-notebook'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
|
@ -102,10 +102,16 @@ def create_ui(default_preset):
|
|||||||
|
|
||||||
def create_event_handlers():
|
def create_event_handlers():
|
||||||
shared.gradio['filter_by_loader'].change(loaders.blacklist_samplers, gradio('filter_by_loader', 'dynamic_temperature'), gradio(loaders.list_all_samplers()), show_progress=False)
|
shared.gradio['filter_by_loader'].change(loaders.blacklist_samplers, gradio('filter_by_loader', 'dynamic_temperature'), gradio(loaders.list_all_samplers()), show_progress=False)
|
||||||
shared.gradio['preset_menu'].change(presets.load_preset_for_ui, gradio('preset_menu', 'interface_state'), gradio('interface_state') + gradio(presets.presets_params()))
|
shared.gradio['preset_menu'].change(
|
||||||
shared.gradio['random_preset'].click(presets.random_preset, gradio('interface_state'), gradio('interface_state') + gradio(presets.presets_params()))
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
shared.gradio['grammar_file'].change(load_grammar, gradio('grammar_file'), gradio('grammar_string'))
|
presets.load_preset_for_ui, gradio('preset_menu', 'interface_state'), gradio('interface_state') + gradio(presets.presets_params()), show_progress=False)
|
||||||
shared.gradio['dynamic_temperature'].change(lambda x: [gr.update(visible=x)] * 3, gradio('dynamic_temperature'), gradio('dynatemp_low', 'dynatemp_high', 'dynatemp_exponent'))
|
|
||||||
|
shared.gradio['random_preset'].click(
|
||||||
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
|
presets.random_preset, gradio('interface_state'), gradio('interface_state') + gradio(presets.presets_params()), show_progress=False)
|
||||||
|
|
||||||
|
shared.gradio['grammar_file'].change(load_grammar, gradio('grammar_file'), gradio('grammar_string'), show_progress=False)
|
||||||
|
shared.gradio['dynamic_temperature'].change(lambda x: [gr.update(visible=x)] * 3, gradio('dynamic_temperature'), gradio('dynatemp_low', 'dynatemp_high', 'dynatemp_exponent'), show_progress=False)
|
||||||
|
|
||||||
|
|
||||||
def get_truncation_length():
|
def get_truncation_length():
|
||||||
|
@ -35,15 +35,22 @@ def create_ui():
|
|||||||
None, None, None, js='() => {document.body.innerHTML=\'<h1 style="font-family:monospace;padding-top:20%;margin:0;height:100vh;color:lightgray;text-align:center;background:var(--body-background-fill)">Reloading...</h1>\'; setTimeout(function(){location.reload()},2500); return []}')
|
None, None, None, js='() => {document.body.innerHTML=\'<h1 style="font-family:monospace;padding-top:20%;margin:0;height:100vh;color:lightgray;text-align:center;background:var(--body-background-fill)">Reloading...</h1>\'; setTimeout(function(){location.reload()},2500); return []}')
|
||||||
|
|
||||||
shared.gradio['toggle_dark_mode'].click(
|
shared.gradio['toggle_dark_mode'].click(
|
||||||
None, None, None, js='() => {document.getElementsByTagName("body")[0].classList.toggle("dark")}').then(
|
lambda x: 'dark' if x == 'light' else 'light', gradio('theme_state'), gradio('theme_state')).then(
|
||||||
lambda x: 'dark' if x == 'light' else 'light', gradio('theme_state'), gradio('theme_state'))
|
None, None, None, js=f'() => {{{ui.dark_theme_js}; toggleDarkMode()}}')
|
||||||
|
|
||||||
shared.gradio['save_settings'].click(
|
shared.gradio['save_settings'].click(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
ui.save_settings, gradio('interface_state', 'preset_menu', 'extensions_menu', 'show_controls', 'theme_state'), gradio('save_contents')).then(
|
handle_save_settings, gradio('interface_state', 'preset_menu', 'extensions_menu', 'show_controls', 'theme_state'), gradio('save_contents', 'save_filename', 'save_root', 'file_saver'), show_progress=False)
|
||||||
lambda: './', None, gradio('save_root')).then(
|
|
||||||
lambda: 'settings.yaml', None, gradio('save_filename')).then(
|
|
||||||
lambda: gr.update(visible=True), None, gradio('file_saver'))
|
def handle_save_settings(state, preset, extensions, show_controls, theme):
|
||||||
|
contents = ui.save_settings(state, preset, extensions, show_controls, theme)
|
||||||
|
return [
|
||||||
|
contents,
|
||||||
|
"settings.yaml",
|
||||||
|
"./",
|
||||||
|
gr.update(visible=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def set_interface_arguments(extensions, bool_active):
|
def set_interface_arguments(extensions, bool_active):
|
||||||
|
@ -95,11 +95,10 @@ def get_available_presets():
|
|||||||
|
|
||||||
|
|
||||||
def get_available_prompts():
|
def get_available_prompts():
|
||||||
prompts = []
|
prompt_files = list(Path('prompts').glob('*.txt'))
|
||||||
files = set((k.stem for k in Path('prompts').glob('*.txt')))
|
sorted_files = sorted(prompt_files, key=lambda x: x.stat().st_mtime, reverse=True)
|
||||||
prompts += sorted([k for k in files if re.match('^[0-9]', k)], key=natural_keys, reverse=True)
|
prompts = [file.stem for file in sorted_files]
|
||||||
prompts += sorted([k for k in files if re.match('^[^0-9]', k)], key=natural_keys)
|
prompts.append('None')
|
||||||
prompts += ['None']
|
|
||||||
return prompts
|
return prompts
|
||||||
|
|
||||||
|
|
||||||
|
@ -35,22 +35,22 @@ sse-starlette==1.6.5
|
|||||||
tiktoken
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, AVX2)
|
# llama-cpp-python (CPU only, AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
|
||||||
# llama-cpp-python (CUDA, no tensor cores)
|
# llama-cpp-python (CUDA, no tensor cores)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.82+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.83+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.82+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.83+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.82+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.83+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.82+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.83+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
|
||||||
# llama-cpp-python (CUDA, tensor cores)
|
# llama-cpp-python (CUDA, tensor cores)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.82+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.83+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.82+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.83+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.82+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.83+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.82+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.83+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
|
||||||
# CUDA wheels
|
# CUDA wheels
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+cu121.torch2.2.2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+cu121.torch2.2.2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
|
@ -32,14 +32,14 @@ sse-starlette==1.6.5
|
|||||||
tiktoken
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, AVX2)
|
# llama-cpp-python (CPU only, AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
|
||||||
# AMD wheels
|
# AMD wheels
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.82+rocm5.6.1-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.83+rocm5.6.1-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.82+rocm5.6.1-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.83+rocm5.6.1-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+rocm5.6.torch2.2.2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+rocm5.6.torch2.2.2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+rocm5.6.torch2.2.2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+rocm5.6.torch2.2.2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64"
|
||||||
|
@ -32,10 +32,10 @@ sse-starlette==1.6.5
|
|||||||
tiktoken
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, no AVX2)
|
# llama-cpp-python (CPU only, no AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
|
||||||
# AMD wheels
|
# AMD wheels
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+rocm5.6.torch2.2.2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+rocm5.6.torch2.2.2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
|
@ -32,8 +32,8 @@ sse-starlette==1.6.5
|
|||||||
tiktoken
|
tiktoken
|
||||||
|
|
||||||
# Mac wheels
|
# Mac wheels
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp311-cp311-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp311-cp311-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp310-cp310-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp310-cp310-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp311-cp311-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp311-cp311-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp310-cp310-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp310-cp310-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7-py3-none-any.whl
|
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7-py3-none-any.whl
|
||||||
|
@ -32,10 +32,10 @@ sse-starlette==1.6.5
|
|||||||
tiktoken
|
tiktoken
|
||||||
|
|
||||||
# Mac wheels
|
# Mac wheels
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp311-cp311-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp311-cp311-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp310-cp310-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp310-cp310-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp311-cp311-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp311-cp311-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp310-cp310-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp310-cp310-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp311-cp311-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp311-cp311-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.82-cp310-cp310-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.83-cp310-cp310-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7-py3-none-any.whl
|
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7-py3-none-any.whl
|
||||||
|
@ -32,7 +32,7 @@ sse-starlette==1.6.5
|
|||||||
tiktoken
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, AVX2)
|
# llama-cpp-python (CPU only, AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
@ -32,7 +32,7 @@ sse-starlette==1.6.5
|
|||||||
tiktoken
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, no AVX2)
|
# llama-cpp-python (CPU only, no AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
@ -35,22 +35,22 @@ sse-starlette==1.6.5
|
|||||||
tiktoken
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, no AVX2)
|
# llama-cpp-python (CPU only, no AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.82+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.83+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
|
||||||
# llama-cpp-python (CUDA, no tensor cores)
|
# llama-cpp-python (CUDA, no tensor cores)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.82+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.83+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.82+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.83+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.82+cu121avx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.83+cu121avx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.82+cu121avx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.83+cu121avx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
|
||||||
# llama-cpp-python (CUDA, tensor cores)
|
# llama-cpp-python (CUDA, tensor cores)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.82+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.83+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.82+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.83+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.82+cu121avx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.83+cu121avx-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.82+cu121avx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.83+cu121avx-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
|
||||||
# CUDA wheels
|
# CUDA wheels
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+cu121.torch2.2.2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.1.7/exllamav2-0.1.7+cu121.torch2.2.2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
|
19
server.py
19
server.py
@ -146,11 +146,21 @@ def create_interface():
|
|||||||
ui_model_menu.create_event_handlers()
|
ui_model_menu.create_event_handlers()
|
||||||
|
|
||||||
# Interface launch events
|
# Interface launch events
|
||||||
shared.gradio['interface'].load(None, None, None, js=f"() => {{if ({str(shared.settings['dark_theme']).lower()}) {{ document.getElementsByTagName('body')[0].classList.add('dark'); }} }}")
|
shared.gradio['interface'].load(
|
||||||
shared.gradio['interface'].load(None, None, None, js=f"() => {{{js}}}")
|
None,
|
||||||
shared.gradio['interface'].load(None, gradio('show_controls'), None, js=f'(x) => {{{ui.show_controls_js}; toggle_controls(x)}}')
|
gradio('show_controls'),
|
||||||
|
None,
|
||||||
|
js=f"""(x) => {{
|
||||||
|
if ({str(shared.settings['dark_theme']).lower()}) {{
|
||||||
|
document.getElementsByTagName('body')[0].classList.add('dark');
|
||||||
|
}}
|
||||||
|
{js}
|
||||||
|
{ui.show_controls_js}
|
||||||
|
toggle_controls(x);
|
||||||
|
}}"""
|
||||||
|
)
|
||||||
|
|
||||||
shared.gradio['interface'].load(partial(ui.apply_interface_values, {}, use_persistent=True), None, gradio(ui.list_interface_input_elements()), show_progress=False)
|
shared.gradio['interface'].load(partial(ui.apply_interface_values, {}, use_persistent=True), None, gradio(ui.list_interface_input_elements()), show_progress=False)
|
||||||
shared.gradio['interface'].load(chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
|
|
||||||
|
|
||||||
extensions_module.create_extensions_tabs() # Extensions tabs
|
extensions_module.create_extensions_tabs() # Extensions tabs
|
||||||
extensions_module.create_extensions_block() # Extensions block
|
extensions_module.create_extensions_block() # Extensions block
|
||||||
@ -169,6 +179,7 @@ def create_interface():
|
|||||||
ssl_verify=False if (shared.args.ssl_keyfile or shared.args.ssl_certfile) else True,
|
ssl_verify=False if (shared.args.ssl_keyfile or shared.args.ssl_certfile) else True,
|
||||||
ssl_keyfile=shared.args.ssl_keyfile,
|
ssl_keyfile=shared.args.ssl_keyfile,
|
||||||
ssl_certfile=shared.args.ssl_certfile,
|
ssl_certfile=shared.args.ssl_certfile,
|
||||||
|
root_path=shared.args.subpath,
|
||||||
allowed_paths=["cache", "css", "extensions", "js"]
|
allowed_paths=["cache", "css", "extensions", "js"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
dark_theme: true
|
dark_theme: true
|
||||||
show_controls: true
|
show_controls: true
|
||||||
start_with: ''
|
start_with: ''
|
||||||
mode: chat
|
mode: chat-instruct
|
||||||
chat_style: cai-chat
|
chat_style: cai-chat
|
||||||
prompt-default: QA
|
prompt-default: QA
|
||||||
prompt-notebook: QA
|
prompt-notebook: QA
|
||||||
|
4
training/formats/ChatML-format.json
Normal file
4
training/formats/ChatML-format.json
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"instruction,output": "<|im_start|>system\n<|im_end|>\n<|im_start|>user\n%instruction%<|im_end|>\n<|im_start|>assistant\n%output%<|im_end|>",
|
||||||
|
"instruction,input,output": "<|im_start|>system\n<|im_end|>\n<|im_start|>user\n%instruction%: %input%<|im_end|>\n<|im_start|>assistant\n%output%<|im_end|>"
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user