mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-12-28 23:28:40 +01:00
commit
2dc8db8aa4
21
css/main.css
21
css/main.css
@ -195,8 +195,9 @@ button {
|
||||
border: var(--input-border-width) solid var(--input-border-color) !important;
|
||||
}
|
||||
|
||||
.file-saver > :first-child > :nth-child(2) {
|
||||
.file-saver > :first-child > :last-child {
|
||||
background: var(--block-background-fill);
|
||||
justify-content: flex-end;
|
||||
}
|
||||
|
||||
.checkboxgroup-table label {
|
||||
@ -652,14 +653,24 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
|
||||
/* ----------------------------------------------
|
||||
Past chats menus
|
||||
---------------------------------------------- */
|
||||
#past-chats-row {
|
||||
margin-bottom: calc( -1 * var(--layout-gap) );
|
||||
}
|
||||
|
||||
#rename-row label {
|
||||
margin-top: var(--layout-gap);
|
||||
}
|
||||
|
||||
/* ----------------------------------------------
|
||||
Past chat histories in a side bar on desktop
|
||||
---------------------------------------------- */
|
||||
@media screen and (width >= 1327px) {
|
||||
#past-chats-row {
|
||||
position: absolute;
|
||||
top: 16px;
|
||||
left: 0;
|
||||
width: calc(0.5*(var(--document-width) - 880px - 120px - 16px*2));
|
||||
max-width: 300px;
|
||||
margin-left: calc(-0.5*(var(--document-width) - 880px - 14px - 16px * 2));
|
||||
}
|
||||
}
|
||||
|
||||
/* ----------------------------------------------
|
||||
Keep dropdown menus above errored components
|
||||
---------------------------------------------- */
|
||||
|
@ -21,7 +21,7 @@ These were obtained after a blind contest called "Preset Arena" where hundreds o
|
||||
|
||||
A key takeaway is that the best presets are:
|
||||
|
||||
* **For Instruct**: Divine Intellect, Big O, simple-1, Space Alien, StarChat, Titanic, tfs-with-top-a, Asterism, Contrastive Search (only works for the Transformers loader at the moment).
|
||||
* **For Instruct**: Divine Intellect, Big O, simple-1.
|
||||
* **For Chat**: Midnight Enigma, Yara, Shortwave.
|
||||
|
||||
The other presets are:
|
||||
@ -54,8 +54,7 @@ For more information about the parameters, the [transformers documentation](http
|
||||
* **mirostat_mode**: Activates the Mirostat sampling technique. It aims to control perplexity during sampling. See the [paper](https://arxiv.org/abs/2007.14966).
|
||||
* **mirostat_tau**: No idea, see the paper for details. According to the Preset Arena, 8 is a good value.
|
||||
* **mirostat_eta**: No idea, see the paper for details. According to the Preset Arena, 0.1 is a good value.
|
||||
* **dynamic_temperature_low**: The lower bound for temperature in Dynamic Temperature. Only used when "dynamic_temperature" is checked.
|
||||
* **dynamic_temperature**: Activates Dynamic Temperature. This modifies temperature to range between "dynamic_temperature_low" (minimum) and "temperature" (maximum), with an entropy-based scaling.
|
||||
* **dynamic_temperature**: Activates Dynamic Temperature. This modifies temperature to range between "dynatemp_low" (minimum) and "dynatemp_high" (maximum), with an entropy-based scaling. The steepness of the curve is controlled by "dynatemp_exponent".
|
||||
* **temperature_last**: Makes temperature the last sampler instead of the first. With this, you can remove low probability tokens with a sampler like min_p and then use a high temperature to make the model creative without losing coherency.
|
||||
* **do_sample**: When unchecked, sampling is entirely disabled, and greedy decoding is used instead (the most likely token is always picked).
|
||||
* **Seed**: Set the Pytorch seed to this number. Note that some loaders do not use Pytorch (notably llama.cpp), and others are not deterministic (notably ExLlama v1 and v2). For these loaders, the seed has no effect.
|
||||
|
@ -108,7 +108,7 @@ def ui():
|
||||
gr.HTML(value="<style>" + generate_css() + "</style>")
|
||||
with gr.Row():
|
||||
filter_box = gr.Textbox(label='', placeholder='Filter', lines=1, max_lines=1, container=False, elem_id='gallery-filter-box')
|
||||
gr.ClearButton(filter_box, value='🗑️', elem_classes='refresh-button')
|
||||
gr.ClearButton(filter_box, value='Clear', elem_classes='refresh-button')
|
||||
update = gr.Button("Refresh", elem_classes='refresh-button')
|
||||
|
||||
gallery = gr.Dataset(
|
||||
|
@ -9,7 +9,9 @@ class GenerationOptions(BaseModel):
|
||||
preset: str | None = Field(default=None, description="The name of a file under text-generation-webui/presets (without the .yaml extension). The sampling parameters that get overwritten by this option are the keys in the default_preset() function in modules/presets.py.")
|
||||
min_p: float = 0
|
||||
dynamic_temperature: bool = False
|
||||
dynamic_temperature_low: float = 0.1
|
||||
dynatemp_low: float = 1
|
||||
dynatemp_high: float = 1
|
||||
dynatemp_exponent: float = 1
|
||||
top_k: int = 0
|
||||
repetition_penalty: float = 1
|
||||
repetition_penalty_range: int = 1024
|
||||
|
@ -4,13 +4,12 @@ instruction_template: |-
|
||||
{{- message['content'] -}}
|
||||
{%- else -%}
|
||||
{%- if message['role'] == 'user' -%}
|
||||
{{-'[INST] ' + message['content'] + ' [/INST]'-}}
|
||||
{{-' [INST] ' + message['content'].rstrip() + ' [/INST] '-}}
|
||||
{%- else -%}
|
||||
{{-'' + message['content'] + '</s> ' -}}
|
||||
{{-'' + message['content'] + '</s>' -}}
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- if add_generation_prompt -%}
|
||||
{{-''-}}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
56
js/main.js
56
js/main.js
@ -37,6 +37,7 @@ document.querySelector(".header_bar").addEventListener("click", function(event)
|
||||
//------------------------------------------------
|
||||
// Keyboard shortcuts
|
||||
//------------------------------------------------
|
||||
let previousTabId = "chat-tab-button";
|
||||
document.addEventListener("keydown", function(event) {
|
||||
|
||||
// Stop generation on Esc pressed
|
||||
@ -97,6 +98,19 @@ document.addEventListener("keydown", function(event) {
|
||||
document.getElementById("Impersonate").click();
|
||||
}
|
||||
|
||||
// Switch between tabs on Tab
|
||||
else if (!event.ctrlKey && !event.shiftKey && event.key === "Tab") {
|
||||
event.preventDefault();
|
||||
var parametersButton = document.getElementById("parameters-button");
|
||||
var parentContainer = parametersButton.parentNode;
|
||||
var selectedChild = parentContainer.querySelector(".selected");
|
||||
if (selectedChild.id == "parameters-button") {
|
||||
document.getElementById(previousTabId).click();
|
||||
} else {
|
||||
previousTabId = selectedChild.id;
|
||||
parametersButton.click();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
//------------------------------------------------
|
||||
@ -248,7 +262,7 @@ button.addEventListener("click", function () {
|
||||
hideMenu();
|
||||
}
|
||||
else {
|
||||
showMenu();
|
||||
showMenu();
|
||||
}
|
||||
});
|
||||
|
||||
@ -371,3 +385,43 @@ new ResizeObserver(updateCssProperties)
|
||||
.observe(document.querySelector("#chat-input textarea"));
|
||||
|
||||
window.addEventListener("resize", updateCssProperties);
|
||||
|
||||
//------------------------------------------------
|
||||
// Keep track of the display width to position the past
|
||||
// chats dropdown on desktop
|
||||
//------------------------------------------------
|
||||
function updateDocumentWidth() {
|
||||
var updatedWidth = window.innerWidth || document.documentElement.clientWidth || document.body.clientWidth;
|
||||
document.documentElement.style.setProperty("--document-width", updatedWidth + "px");
|
||||
}
|
||||
|
||||
updateDocumentWidth();
|
||||
window.addEventListener("resize", updateDocumentWidth);
|
||||
|
||||
//------------------------------------------------
|
||||
// Focus on the rename text area when it becomes visible
|
||||
//------------------------------------------------
|
||||
const renameTextArea = document.getElementById("rename-row").querySelector("textarea");
|
||||
|
||||
function respondToVisibility(element, callback) {
|
||||
var options = {
|
||||
root: document.documentElement,
|
||||
};
|
||||
|
||||
var observer = new IntersectionObserver((entries, observer) => {
|
||||
entries.forEach(entry => {
|
||||
callback(entry.intersectionRatio > 0);
|
||||
});
|
||||
}, options);
|
||||
|
||||
observer.observe(element);
|
||||
}
|
||||
|
||||
|
||||
function handleVisibilityChange(isVisible) {
|
||||
if (isVisible) {
|
||||
renameTextArea.focus();
|
||||
}
|
||||
}
|
||||
|
||||
respondToVisibility(renameTextArea, handleVisibilityChange);
|
||||
|
@ -3,7 +3,7 @@ def get_alpha_value(alpha, base):
|
||||
Gets alpha_value from alpha_value and rope_freq_base
|
||||
'''
|
||||
if base > 0:
|
||||
return (base/10000.) ** (63/64.)
|
||||
return (base / 10000.) ** (63 / 64.)
|
||||
else:
|
||||
return alpha
|
||||
|
||||
@ -15,4 +15,4 @@ def get_rope_freq_base(alpha, base):
|
||||
if base > 0:
|
||||
return base
|
||||
else:
|
||||
return 10000 * alpha ** (64/63.)
|
||||
return 10000 * alpha ** (64 / 63.)
|
||||
|
@ -14,6 +14,7 @@ from jinja2.sandbox import ImmutableSandboxedEnvironment
|
||||
from PIL import Image
|
||||
|
||||
import modules.shared as shared
|
||||
from modules import utils
|
||||
from modules.extensions import apply_extensions
|
||||
from modules.html_generator import chat_html_wrapper, make_thumbnail
|
||||
from modules.logging_colors import logger
|
||||
@ -517,6 +518,35 @@ def load_latest_history(state):
|
||||
return history
|
||||
|
||||
|
||||
def load_history_after_deletion(state, idx):
|
||||
'''
|
||||
Loads the latest history for the given character in chat or chat-instruct
|
||||
mode, or the latest instruct history for instruct mode.
|
||||
'''
|
||||
|
||||
if shared.args.multi_user:
|
||||
return start_new_chat(state)
|
||||
|
||||
histories = find_all_histories(state)
|
||||
idx = min(int(idx), len(histories) - 1)
|
||||
idx = max(0, idx)
|
||||
|
||||
if len(histories) > 0:
|
||||
history = load_history(histories[idx], state['character_menu'], state['mode'])
|
||||
else:
|
||||
history = start_new_chat(state)
|
||||
histories = find_all_histories(state)
|
||||
|
||||
return history, gr.update(choices=histories, value=histories[idx])
|
||||
|
||||
|
||||
def update_character_menu_after_deletion(idx):
|
||||
characters = utils.get_available_characters()
|
||||
idx = min(int(idx), len(characters) - 1)
|
||||
idx = max(0, idx)
|
||||
return gr.update(choices=characters, value=characters[idx])
|
||||
|
||||
|
||||
def load_history(unique_id, character, mode):
|
||||
p = get_history_file_path(unique_id, character, mode)
|
||||
|
||||
|
@ -156,7 +156,9 @@ def transformers_samplers():
|
||||
'temperature',
|
||||
'temperature_last',
|
||||
'dynamic_temperature',
|
||||
'dynamic_temperature_low',
|
||||
'dynatemp_low',
|
||||
'dynatemp_high',
|
||||
'dynatemp_exponent',
|
||||
'top_p',
|
||||
'min_p',
|
||||
'top_k',
|
||||
@ -223,7 +225,9 @@ loaders_samplers = {
|
||||
'temperature',
|
||||
'temperature_last',
|
||||
'dynamic_temperature',
|
||||
'dynamic_temperature_low',
|
||||
'dynatemp_low',
|
||||
'dynatemp_high',
|
||||
'dynatemp_exponent',
|
||||
'top_p',
|
||||
'min_p',
|
||||
'top_k',
|
||||
@ -277,7 +281,9 @@ loaders_samplers = {
|
||||
'temperature',
|
||||
'temperature_last',
|
||||
'dynamic_temperature',
|
||||
'dynamic_temperature_low',
|
||||
'dynatemp_low',
|
||||
'dynatemp_high',
|
||||
'dynatemp_exponent',
|
||||
'top_p',
|
||||
'min_p',
|
||||
'top_k',
|
||||
@ -350,12 +356,20 @@ def list_all_samplers():
|
||||
return sorted(all_samplers)
|
||||
|
||||
|
||||
def blacklist_samplers(loader):
|
||||
def blacklist_samplers(loader, dynamic_temperature):
|
||||
all_samplers = list_all_samplers()
|
||||
if loader == 'All':
|
||||
return [gr.update(visible=True) for sampler in all_samplers]
|
||||
else:
|
||||
return [gr.update(visible=True) if sampler in loaders_samplers[loader] else gr.update(visible=False) for sampler in all_samplers]
|
||||
output = []
|
||||
|
||||
for sampler in all_samplers:
|
||||
if loader == 'All' or sampler in loaders_samplers[loader]:
|
||||
if sampler.startswith('dynatemp'):
|
||||
output.append(gr.update(visible=dynamic_temperature))
|
||||
else:
|
||||
output.append(gr.update(visible=True))
|
||||
else:
|
||||
output.append(gr.update(visible=False))
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def get_model_types(loader):
|
||||
|
@ -309,14 +309,14 @@ def AutoAWQ_loader(model_name):
|
||||
model_dir = Path(f'{shared.args.model_dir}/{model_name}')
|
||||
|
||||
model = AutoAWQForCausalLM.from_quantized(
|
||||
quant_path=model_dir,
|
||||
max_new_tokens=shared.args.max_seq_len,
|
||||
trust_remote_code=shared.args.trust_remote_code,
|
||||
fuse_layers=not shared.args.no_inject_fused_attention,
|
||||
max_memory=get_max_memory_dict(),
|
||||
batch_size=1,
|
||||
safetensors=any(model_dir.glob('*.safetensors')),
|
||||
)
|
||||
quant_path=model_dir,
|
||||
max_new_tokens=shared.args.max_seq_len,
|
||||
trust_remote_code=shared.args.trust_remote_code,
|
||||
fuse_layers=not shared.args.no_inject_fused_attention,
|
||||
max_memory=get_max_memory_dict(),
|
||||
batch_size=1,
|
||||
safetensors=any(model_dir.glob('*.safetensors')),
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
|
@ -6,6 +6,7 @@ import yaml
|
||||
|
||||
from modules import shared
|
||||
from modules.loaders import loaders_samplers
|
||||
from modules.logging_colors import logger
|
||||
|
||||
|
||||
def default_preset():
|
||||
@ -13,7 +14,9 @@ def default_preset():
|
||||
'temperature': 1,
|
||||
'temperature_last': False,
|
||||
'dynamic_temperature': False,
|
||||
'dynamic_temperature_low': 0.1,
|
||||
'dynatemp_low': 1,
|
||||
'dynatemp_high': 1,
|
||||
'dynatemp_exponent': 1,
|
||||
'top_p': 1,
|
||||
'min_p': 0,
|
||||
'top_k': 0,
|
||||
@ -48,11 +51,15 @@ def presets_params():
|
||||
def load_preset(name):
|
||||
generate_params = default_preset()
|
||||
if name not in ['None', None, '']:
|
||||
with open(Path(f'presets/{name}.yaml'), 'r') as infile:
|
||||
preset = yaml.safe_load(infile)
|
||||
path = Path(f'presets/{name}.yaml')
|
||||
if path.exists():
|
||||
with open(path, 'r') as infile:
|
||||
preset = yaml.safe_load(infile)
|
||||
|
||||
for k in preset:
|
||||
generate_params[k] = preset[k]
|
||||
for k in preset:
|
||||
generate_params[k] = preset[k]
|
||||
else:
|
||||
logger.error(f"The preset \"{name}\" does not exist under \"{path}\". Using the default parameters.")
|
||||
|
||||
return generate_params
|
||||
|
||||
|
@ -16,7 +16,7 @@ global_scores = None
|
||||
|
||||
|
||||
class TemperatureLogitsWarperWithDynatemp(LogitsWarper):
|
||||
def __init__(self, temperature: float, dynamic_temperature: bool, dynamic_temperature_low: float):
|
||||
def __init__(self, temperature: float, dynamic_temperature: bool, dynatemp_low: float, dynatemp_high: float, dynatemp_exponent: float):
|
||||
if not isinstance(temperature, float) or not (temperature > 0):
|
||||
except_msg = (
|
||||
f"`temperature` (={temperature}) has to be a strictly positive float, otherwise your next token "
|
||||
@ -29,7 +29,9 @@ class TemperatureLogitsWarperWithDynatemp(LogitsWarper):
|
||||
|
||||
self.temperature = temperature
|
||||
self.dynamic_temperature = dynamic_temperature
|
||||
self.dynamic_temperature_low = dynamic_temperature_low
|
||||
self.dynatemp_low = dynatemp_low
|
||||
self.dynatemp_high = dynatemp_high
|
||||
self.dynatemp_exponent = dynatemp_exponent
|
||||
|
||||
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
||||
|
||||
@ -40,9 +42,9 @@ class TemperatureLogitsWarperWithDynatemp(LogitsWarper):
|
||||
|
||||
# Dynamic temperature
|
||||
else:
|
||||
min_temp = self.dynamic_temperature_low
|
||||
max_temp = self.temperature
|
||||
exponent_val = 1.0
|
||||
min_temp = self.dynatemp_low
|
||||
max_temp = self.dynatemp_high
|
||||
exponent_val = self.dynatemp_exponent
|
||||
|
||||
# Convert logits to probabilities
|
||||
probs = torch.softmax(scores, dim=-1)
|
||||
@ -82,7 +84,7 @@ class TemperatureLogitsWarperWithDynatemp(LogitsWarper):
|
||||
|
||||
# max_prob_token_id = torch.argmax(scores, dim=-1) # Get the token ID with the highest probability
|
||||
# max_prob_token = shared.tokenizer.convert_ids_to_tokens(int(max_prob_token_id)) # Convert ID to token
|
||||
# print("--- T=", float(dyn_temp), "token=", max_prob_token, "min=", min_temp, "max=", max_temp)
|
||||
# print("--- T=", float(dyn_temp), "token=", max_prob_token, "min=", min_temp, "max=", max_temp, "exponent=", exponent_val)
|
||||
|
||||
return scores
|
||||
|
||||
@ -292,7 +294,13 @@ def get_logits_warper_patch(self, generation_config):
|
||||
warpers = self._get_logits_warper_old(generation_config)
|
||||
for i in range(len(warpers)):
|
||||
if warpers[i].__class__.__name__ == 'TemperatureLogitsWarper':
|
||||
warpers[i] = TemperatureLogitsWarperWithDynatemp(temperature, generation_config.dynamic_temperature, generation_config.dynamic_temperature_low)
|
||||
warpers[i] = TemperatureLogitsWarperWithDynatemp(
|
||||
temperature,
|
||||
generation_config.dynamic_temperature,
|
||||
generation_config.dynatemp_low,
|
||||
generation_config.dynatemp_high,
|
||||
generation_config.dynatemp_exponent
|
||||
)
|
||||
|
||||
warpers_to_add = LogitsProcessorList()
|
||||
min_tokens_to_keep = 2 if generation_config.num_beams > 1 else 1
|
||||
@ -361,7 +369,9 @@ def generation_config_init_patch(self, **kwargs):
|
||||
self.__init___old(**kwargs)
|
||||
self.min_p = kwargs.pop("min_p", 0.0)
|
||||
self.dynamic_temperature = kwargs.pop("dynamic_temperature", False)
|
||||
self.dynamic_temperature_low = kwargs.pop("dynamic_temperature_low", 0.1)
|
||||
self.dynatemp_low = kwargs.pop("dynatemp_low", 1)
|
||||
self.dynatemp_high = kwargs.pop("dynatemp_high", 1)
|
||||
self.dynatemp_exponent = kwargs.pop("dynatemp_exponent", 1)
|
||||
self.tfs = kwargs.pop("tfs", 1.0)
|
||||
self.top_a = kwargs.pop("top_a", 0.0)
|
||||
self.mirostat_mode = kwargs.pop("mirostat_mode", 0)
|
||||
|
@ -285,7 +285,7 @@ def get_reply_from_output_ids(output_ids, state, starting_from=0):
|
||||
|
||||
def generate_reply_HF(question, original_question, seed, state, stopping_strings=None, is_chat=False):
|
||||
generate_params = {}
|
||||
for k in ['max_new_tokens', 'temperature', 'temperature_last', 'dynamic_temperature', 'dynamic_temperature_low', 'top_p', 'min_p', 'top_k', 'repetition_penalty', 'presence_penalty', 'frequency_penalty', 'repetition_penalty_range', 'typical_p', 'tfs', 'top_a', 'guidance_scale', 'penalty_alpha', 'mirostat_mode', 'mirostat_tau', 'mirostat_eta', 'do_sample', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'num_beams', 'length_penalty', 'early_stopping']:
|
||||
for k in ['max_new_tokens', 'temperature', 'temperature_last', 'dynamic_temperature', 'dynatemp_low', 'dynatemp_high', 'dynatemp_exponent', 'top_p', 'min_p', 'top_k', 'repetition_penalty', 'presence_penalty', 'frequency_penalty', 'repetition_penalty_range', 'typical_p', 'tfs', 'top_a', 'guidance_scale', 'penalty_alpha', 'mirostat_mode', 'mirostat_tau', 'mirostat_eta', 'do_sample', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'num_beams', 'length_penalty', 'early_stopping']:
|
||||
generate_params[k] = state[k]
|
||||
|
||||
if state['negative_prompt'] != '':
|
||||
|
@ -116,7 +116,9 @@ def list_interface_input_elements():
|
||||
'temperature',
|
||||
'temperature_last',
|
||||
'dynamic_temperature',
|
||||
'dynamic_temperature_low',
|
||||
'dynatemp_low',
|
||||
'dynatemp_high',
|
||||
'dynatemp_exponent',
|
||||
'top_p',
|
||||
'min_p',
|
||||
'top_k',
|
||||
@ -207,7 +209,7 @@ def apply_interface_values(state, use_persistent=False):
|
||||
return [state[k] if k in state else gr.update() for k in elements]
|
||||
|
||||
|
||||
def save_settings(state, preset, extensions_list, show_controls):
|
||||
def save_settings(state, preset, extensions_list, show_controls, theme_state):
|
||||
output = copy.deepcopy(shared.settings)
|
||||
exclude = ['name2', 'greeting', 'context', 'turn_template']
|
||||
for k in state:
|
||||
@ -221,6 +223,7 @@ def save_settings(state, preset, extensions_list, show_controls):
|
||||
output['default_extensions'] = extensions_list
|
||||
output['seed'] = int(output['seed'])
|
||||
output['show_controls'] = show_controls
|
||||
output['dark_theme'] = True if theme_state == 'dark' else False
|
||||
|
||||
# Save extension values in the UI
|
||||
for extension_name in extensions_list:
|
||||
@ -242,14 +245,11 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele
|
||||
refresh_method()
|
||||
args = refreshed_args() if callable(refreshed_args) else refreshed_args
|
||||
|
||||
for k, v in args.items():
|
||||
setattr(refresh_component, k, v)
|
||||
|
||||
return gr.update(**(args or {}))
|
||||
|
||||
refresh_button = gr.Button(refresh_symbol, elem_classes=elem_class, interactive=interactive)
|
||||
refresh_button.click(
|
||||
fn=refresh,
|
||||
fn=lambda: {k: tuple(v) if type(k) is list else v for k, v in refresh().items()},
|
||||
inputs=[],
|
||||
outputs=[refresh_component]
|
||||
)
|
||||
|
@ -56,24 +56,26 @@ def create_ui():
|
||||
shared.gradio['Send dummy message'] = gr.Button('Send dummy message')
|
||||
shared.gradio['Send dummy reply'] = gr.Button('Send dummy reply')
|
||||
|
||||
with gr.Row():
|
||||
shared.gradio['Start new chat'] = gr.Button('Start new chat')
|
||||
|
||||
with gr.Row():
|
||||
shared.gradio['send-chat-to-default'] = gr.Button('Send to default')
|
||||
shared.gradio['send-chat-to-notebook'] = gr.Button('Send to notebook')
|
||||
|
||||
with gr.Row(elem_id='past-chats-row'):
|
||||
shared.gradio['unique_id'] = gr.Dropdown(label='Past chats', elem_classes=['slim-dropdown'], interactive=not mu)
|
||||
shared.gradio['rename_chat'] = gr.Button('Rename', elem_classes='refresh-button', interactive=not mu)
|
||||
shared.gradio['delete_chat'] = gr.Button('🗑️', elem_classes='refresh-button', interactive=not mu)
|
||||
shared.gradio['delete_chat-cancel'] = gr.Button('Cancel', visible=False, elem_classes='refresh-button')
|
||||
shared.gradio['delete_chat-confirm'] = gr.Button('Confirm', variant='stop', visible=False, elem_classes='refresh-button')
|
||||
with gr.Row(elem_id='past-chats-row', elem_classes=['pretty_scrollbar']):
|
||||
with gr.Column():
|
||||
with gr.Row():
|
||||
shared.gradio['unique_id'] = gr.Dropdown(label='Past chats', elem_classes=['slim-dropdown'], interactive=not mu)
|
||||
|
||||
with gr.Row(elem_id='rename-row'):
|
||||
shared.gradio['rename_to'] = gr.Textbox(label='Rename to:', placeholder='New name', visible=False, elem_classes=['no-background'])
|
||||
shared.gradio['rename_to-cancel'] = gr.Button('Cancel', visible=False, elem_classes='refresh-button')
|
||||
shared.gradio['rename_to-confirm'] = gr.Button('Confirm', visible=False, elem_classes='refresh-button')
|
||||
with gr.Row():
|
||||
shared.gradio['rename_chat'] = gr.Button('Rename', elem_classes='refresh-button', interactive=not mu)
|
||||
shared.gradio['delete_chat'] = gr.Button('🗑️', elem_classes='refresh-button', interactive=not mu)
|
||||
shared.gradio['delete_chat-confirm'] = gr.Button('Confirm', variant='stop', visible=False, elem_classes='refresh-button')
|
||||
shared.gradio['delete_chat-cancel'] = gr.Button('Cancel', visible=False, elem_classes='refresh-button')
|
||||
shared.gradio['Start new chat'] = gr.Button('New chat', elem_classes='refresh-button')
|
||||
|
||||
with gr.Row(elem_id='rename-row'):
|
||||
shared.gradio['rename_to'] = gr.Textbox(label='Rename to:', placeholder='New name', visible=False, elem_classes=['no-background'])
|
||||
shared.gradio['rename_to-confirm'] = gr.Button('Confirm', visible=False, elem_classes='refresh-button')
|
||||
shared.gradio['rename_to-cancel'] = gr.Button('Cancel', visible=False, elem_classes='refresh-button')
|
||||
|
||||
with gr.Row():
|
||||
shared.gradio['start_with'] = gr.Textbox(label='Start reply with', placeholder='Sure thing!', value=shared.settings['start_with'])
|
||||
@ -246,10 +248,10 @@ def create_event_handlers():
|
||||
shared.gradio['delete_chat-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, gradio(clear_arr))
|
||||
shared.gradio['delete_chat-confirm'].click(
|
||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||
lambda x, y: str(chat.find_all_histories(x).index(y)), gradio('interface_state', 'unique_id'), gradio('temporary_text')).then(
|
||||
chat.delete_history, gradio('unique_id', 'character_menu', 'mode'), None).then(
|
||||
chat.load_latest_history, gradio('interface_state'), gradio('history')).then(
|
||||
chat.load_history_after_deletion, gradio('interface_state', 'temporary_text'), gradio('history', 'unique_id')).then(
|
||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
||||
lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id')).then(
|
||||
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, gradio(clear_arr))
|
||||
|
||||
shared.gradio['rename_chat'].click(
|
||||
@ -264,6 +266,11 @@ def create_event_handlers():
|
||||
lambda: [gr.update(visible=False)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False).then(
|
||||
lambda x, y: gr.update(choices=chat.find_all_histories(x), value=y), gradio('interface_state', 'rename_to'), gradio('unique_id'))
|
||||
|
||||
shared.gradio['rename_to'].submit(
|
||||
chat.rename_history, gradio('unique_id', 'rename_to', 'character_menu', 'mode'), None).then(
|
||||
lambda: [gr.update(visible=False)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False).then(
|
||||
lambda x, y: gr.update(choices=chat.find_all_histories(x), value=y), gradio('interface_state', 'rename_to'), gradio('unique_id'))
|
||||
|
||||
shared.gradio['load_chat_history'].upload(
|
||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||
chat.start_new_chat, gradio('interface_state'), gradio('history')).then(
|
||||
|
@ -13,37 +13,37 @@ def create_ui():
|
||||
shared.gradio['save_root'] = gr.Textbox(lines=1, label='File folder', info='For reference. Unchangeable.', interactive=False)
|
||||
shared.gradio['save_contents'] = gr.Textbox(lines=10, label='File contents')
|
||||
with gr.Row():
|
||||
shared.gradio['save_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
|
||||
shared.gradio['save_cancel'] = gr.Button('Cancel', elem_classes="small-button")
|
||||
shared.gradio['save_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
|
||||
|
||||
# Text file deleter
|
||||
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['file_deleter']:
|
||||
shared.gradio['delete_filename'] = gr.Textbox(lines=1, label='File name')
|
||||
shared.gradio['delete_root'] = gr.Textbox(lines=1, label='File folder', info='For reference. Unchangeable.', interactive=False)
|
||||
with gr.Row():
|
||||
shared.gradio['delete_confirm'] = gr.Button('Delete', elem_classes="small-button", variant='stop', interactive=not mu)
|
||||
shared.gradio['delete_cancel'] = gr.Button('Cancel', elem_classes="small-button")
|
||||
shared.gradio['delete_confirm'] = gr.Button('Delete', elem_classes="small-button", variant='stop', interactive=not mu)
|
||||
|
||||
# Character saver/deleter
|
||||
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['character_saver']:
|
||||
shared.gradio['save_character_filename'] = gr.Textbox(lines=1, label='File name', info='The character will be saved to your characters/ folder with this base filename.')
|
||||
with gr.Row():
|
||||
shared.gradio['save_character_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
|
||||
shared.gradio['save_character_cancel'] = gr.Button('Cancel', elem_classes="small-button")
|
||||
shared.gradio['save_character_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
|
||||
|
||||
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['character_deleter']:
|
||||
gr.Markdown('Confirm the character deletion?')
|
||||
with gr.Row():
|
||||
shared.gradio['delete_character_confirm'] = gr.Button('Delete', elem_classes="small-button", variant='stop', interactive=not mu)
|
||||
shared.gradio['delete_character_cancel'] = gr.Button('Cancel', elem_classes="small-button")
|
||||
shared.gradio['delete_character_confirm'] = gr.Button('Delete', elem_classes="small-button", variant='stop', interactive=not mu)
|
||||
|
||||
# Preset saver
|
||||
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['preset_saver']:
|
||||
shared.gradio['save_preset_filename'] = gr.Textbox(lines=1, label='File name', info='The preset will be saved to your presets/ folder with this base filename.')
|
||||
shared.gradio['save_preset_contents'] = gr.Textbox(lines=10, label='File contents')
|
||||
with gr.Row():
|
||||
shared.gradio['save_preset_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
|
||||
shared.gradio['save_preset_cancel'] = gr.Button('Cancel', elem_classes="small-button")
|
||||
shared.gradio['save_preset_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
|
||||
|
||||
|
||||
def create_event_handlers():
|
||||
@ -64,9 +64,10 @@ def create_event_handlers():
|
||||
lambda x: gr.update(choices=utils.get_available_characters(), value=x), gradio('save_character_filename'), gradio('character_menu'))
|
||||
|
||||
shared.gradio['delete_character_confirm'].click(
|
||||
lambda x: str(utils.get_available_characters().index(x)), gradio('character_menu'), gradio('temporary_text')).then(
|
||||
chat.delete_character, gradio('character_menu'), None).then(
|
||||
lambda: gr.update(visible=False), None, gradio('character_deleter')).then(
|
||||
lambda: gr.update(choices=(characters := utils.get_available_characters()), value=characters[0]), None, gradio('character_menu'))
|
||||
chat.update_character_menu_after_deletion, gradio('temporary_text'), gradio('character_menu')).then(
|
||||
lambda: gr.update(visible=False), None, gradio('character_deleter'))
|
||||
|
||||
shared.gradio['save_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_saver'))
|
||||
shared.gradio['delete_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_deleter'))
|
||||
|
@ -49,8 +49,10 @@ def create_ui(default_preset):
|
||||
shared.gradio['mirostat_mode'] = gr.Slider(0, 2, step=1, value=generate_params['mirostat_mode'], label='mirostat_mode', info='mode=1 is for llama.cpp only.')
|
||||
shared.gradio['mirostat_tau'] = gr.Slider(0, 10, step=0.01, value=generate_params['mirostat_tau'], label='mirostat_tau')
|
||||
shared.gradio['mirostat_eta'] = gr.Slider(0, 1, step=0.01, value=generate_params['mirostat_eta'], label='mirostat_eta')
|
||||
shared.gradio['dynamic_temperature_low'] = gr.Slider(0.01, 5, value=generate_params['dynamic_temperature_low'], step=0.01, label='dynamic_temperature_low', info='Only used when dynamic_temperature is checked.')
|
||||
shared.gradio['dynamic_temperature'] = gr.Checkbox(value=generate_params['dynamic_temperature'], label='dynamic_temperature')
|
||||
shared.gradio['dynatemp_low'] = gr.Slider(0.01, 5, value=generate_params['dynatemp_low'], step=0.01, label='dynatemp_low', visible=generate_params['dynamic_temperature'])
|
||||
shared.gradio['dynatemp_high'] = gr.Slider(0.01, 5, value=generate_params['dynatemp_high'], step=0.01, label='dynatemp_high', visible=generate_params['dynamic_temperature'])
|
||||
shared.gradio['dynatemp_exponent'] = gr.Slider(0.01, 5, value=generate_params['dynatemp_exponent'], step=0.01, label='dynatemp_exponent', visible=generate_params['dynamic_temperature'])
|
||||
shared.gradio['temperature_last'] = gr.Checkbox(value=generate_params['temperature_last'], label='temperature_last', info='Makes temperature the last sampler instead of the first.')
|
||||
shared.gradio['do_sample'] = gr.Checkbox(value=generate_params['do_sample'], label='do_sample')
|
||||
shared.gradio['seed'] = gr.Number(value=shared.settings['seed'], label='Seed (-1 for random)')
|
||||
@ -93,10 +95,11 @@ def create_ui(default_preset):
|
||||
|
||||
|
||||
def create_event_handlers():
|
||||
shared.gradio['filter_by_loader'].change(loaders.blacklist_samplers, gradio('filter_by_loader'), gradio(loaders.list_all_samplers()), show_progress=False)
|
||||
shared.gradio['filter_by_loader'].change(loaders.blacklist_samplers, gradio('filter_by_loader', 'dynamic_temperature'), gradio(loaders.list_all_samplers()), show_progress=False)
|
||||
shared.gradio['preset_menu'].change(presets.load_preset_for_ui, gradio('preset_menu', 'interface_state'), gradio('interface_state') + gradio(presets.presets_params()))
|
||||
shared.gradio['random_preset'].click(presets.random_preset, gradio('interface_state'), gradio('interface_state') + gradio(presets.presets_params()))
|
||||
shared.gradio['grammar_file'].change(load_grammar, gradio('grammar_file'), gradio('grammar_string'))
|
||||
shared.gradio['dynamic_temperature'].change(lambda x: [gr.update(visible=x)] * 3, gradio('dynamic_temperature'), gradio('dynatemp_low', 'dynatemp_high', 'dynatemp_exponent'))
|
||||
|
||||
|
||||
def get_truncation_length():
|
||||
|
@ -26,6 +26,7 @@ def create_ui():
|
||||
extension_name = gr.Textbox(lines=1, label='Install or update an extension', info='Enter the GitHub URL below and press Enter. For a list of extensions, see: https://github.com/oobabooga/text-generation-webui-extensions ⚠️ WARNING ⚠️ : extensions can execute arbitrary code. Make sure to inspect their source code before activating them.', interactive=not mu)
|
||||
extension_status = gr.Markdown()
|
||||
|
||||
shared.gradio['theme_state'] = gr.Textbox(visible=False, value='dark' if shared.settings['dark_theme'] else 'light')
|
||||
extension_name.submit(clone_or_pull_repository, extension_name, extension_status, show_progress=False)
|
||||
|
||||
# Reset interface event
|
||||
@ -33,10 +34,13 @@ def create_ui():
|
||||
set_interface_arguments, gradio('extensions_menu', 'bool_menu'), None).then(
|
||||
lambda: None, None, None, _js='() => {document.body.innerHTML=\'<h1 style="font-family:monospace;padding-top:20%;margin:0;height:100vh;color:lightgray;text-align:center;background:var(--body-background-fill)">Reloading...</h1>\'; setTimeout(function(){location.reload()},2500); return []}')
|
||||
|
||||
shared.gradio['toggle_dark_mode'].click(lambda: None, None, None, _js='() => {document.getElementsByTagName("body")[0].classList.toggle("dark")}')
|
||||
shared.gradio['toggle_dark_mode'].click(
|
||||
lambda: None, None, None, _js='() => {document.getElementsByTagName("body")[0].classList.toggle("dark")}').then(
|
||||
lambda x: 'dark' if x == 'light' else 'light', gradio('theme_state'), gradio('theme_state'))
|
||||
|
||||
shared.gradio['save_settings'].click(
|
||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||
ui.save_settings, gradio('interface_state', 'preset_menu', 'extensions_menu', 'show_controls'), gradio('save_contents')).then(
|
||||
ui.save_settings, gradio('interface_state', 'preset_menu', 'extensions_menu', 'show_controls', 'theme_state'), gradio('save_contents')).then(
|
||||
lambda: './', None, gradio('save_root')).then(
|
||||
lambda: 'settings.yaml', None, gradio('save_filename')).then(
|
||||
lambda: gr.update(visible=True), None, gradio('file_saver'))
|
||||
|
@ -25,13 +25,13 @@ def save_file(fname, contents):
|
||||
rel_path_str = os.path.relpath(abs_path_str, root_folder)
|
||||
rel_path = Path(rel_path_str)
|
||||
if rel_path.parts[0] == '..':
|
||||
logger.error(f'Invalid file path: {fname}')
|
||||
logger.error(f'Invalid file path: \"{fname}\"')
|
||||
return
|
||||
|
||||
with open(abs_path_str, 'w', encoding='utf-8') as f:
|
||||
f.write(contents)
|
||||
|
||||
logger.info(f'Saved {abs_path_str}.')
|
||||
logger.info(f'Saved \"{abs_path_str}\".')
|
||||
|
||||
|
||||
def delete_file(fname):
|
||||
@ -44,12 +44,12 @@ def delete_file(fname):
|
||||
rel_path_str = os.path.relpath(abs_path_str, root_folder)
|
||||
rel_path = Path(rel_path_str)
|
||||
if rel_path.parts[0] == '..':
|
||||
logger.error(f'Invalid file path: {fname}')
|
||||
logger.error(f'Invalid file path: \"{fname}\"')
|
||||
return
|
||||
|
||||
if rel_path.exists():
|
||||
rel_path.unlink()
|
||||
logger.info(f'Deleted {fname}.')
|
||||
logger.info(f'Deleted \"{fname}\".')
|
||||
|
||||
|
||||
def current_time():
|
||||
|
@ -1,4 +0,0 @@
|
||||
temperature: 1.68
|
||||
top_p: 0.17
|
||||
repetition_penalty: 1.02
|
||||
top_k: 77
|
@ -1,5 +0,0 @@
|
||||
dynamic_temperature: true
|
||||
dynamic_temperature_low: 0.1
|
||||
temperature: 3
|
||||
temperature_last: true
|
||||
min_p: 0.05
|
@ -1,2 +0,0 @@
|
||||
mirostat_mode: 2
|
||||
mirostat_tau: 8
|
1
presets/Null preset.yaml
Normal file
1
presets/Null preset.yaml
Normal file
@ -0,0 +1 @@
|
||||
temperature: 1
|
@ -1,4 +0,0 @@
|
||||
temperature: 1.31
|
||||
top_p: 0.29
|
||||
repetition_penalty: 1.09
|
||||
top_k: 72
|
@ -1,3 +0,0 @@
|
||||
temperature: 0.2
|
||||
top_p: 0.95
|
||||
top_k: 50
|
@ -1,5 +0,0 @@
|
||||
temperature: 1.01
|
||||
top_p: 0.21
|
||||
repetition_penalty: 1.21
|
||||
encoder_repetition_penalty: 1.07
|
||||
top_k: 91
|
@ -1,4 +0,0 @@
|
||||
temperature: 0.7
|
||||
tfs: 0.95
|
||||
top_a: 0.2
|
||||
repetition_penalty: 1.15
|
@ -4,7 +4,7 @@ datasets
|
||||
einops
|
||||
exllamav2==0.0.11; platform_system != "Darwin" and platform_machine != "x86_64"
|
||||
gradio==3.50.*
|
||||
hqq==0.1.1.post1
|
||||
hqq==0.1.2
|
||||
lm_eval==0.3.0
|
||||
markdown
|
||||
numpy==1.24.*
|
||||
|
@ -4,7 +4,7 @@ datasets
|
||||
einops
|
||||
exllamav2==0.0.11; platform_system == "Windows" or python_version < "3.10" or python_version > "3.11" or platform_machine != "x86_64"
|
||||
gradio==3.50.*
|
||||
hqq==0.1.1.post1
|
||||
hqq==0.1.2
|
||||
lm_eval==0.3.0
|
||||
markdown
|
||||
numpy==1.24.*
|
||||
|
@ -4,7 +4,7 @@ datasets
|
||||
einops
|
||||
exllamav2==0.0.11; platform_system == "Windows" or python_version < "3.10" or python_version > "3.11" or platform_machine != "x86_64"
|
||||
gradio==3.50.*
|
||||
hqq==0.1.1.post1
|
||||
hqq==0.1.2
|
||||
lm_eval==0.3.0
|
||||
markdown
|
||||
numpy==1.24.*
|
||||
|
@ -4,7 +4,7 @@ datasets
|
||||
einops
|
||||
exllamav2==0.0.11
|
||||
gradio==3.50.*
|
||||
hqq==0.1.1.post1
|
||||
hqq==0.1.2
|
||||
lm_eval==0.3.0
|
||||
markdown
|
||||
numpy==1.24.*
|
||||
|
@ -4,7 +4,7 @@ datasets
|
||||
einops
|
||||
exllamav2==0.0.11
|
||||
gradio==3.50.*
|
||||
hqq==0.1.1.post1
|
||||
hqq==0.1.2
|
||||
lm_eval==0.3.0
|
||||
markdown
|
||||
numpy==1.24.*
|
||||
|
@ -4,7 +4,7 @@ datasets
|
||||
einops
|
||||
exllamav2==0.0.11
|
||||
gradio==3.50.*
|
||||
hqq==0.1.1.post1
|
||||
hqq==0.1.2
|
||||
lm_eval==0.3.0
|
||||
markdown
|
||||
numpy==1.24.*
|
||||
|
@ -4,7 +4,7 @@ datasets
|
||||
einops
|
||||
exllamav2==0.0.11
|
||||
gradio==3.50.*
|
||||
hqq==0.1.1.post1
|
||||
hqq==0.1.2
|
||||
lm_eval==0.3.0
|
||||
markdown
|
||||
numpy==1.24.*
|
||||
|
@ -4,7 +4,7 @@ datasets
|
||||
einops
|
||||
exllamav2==0.0.11; platform_system != "Darwin" and platform_machine != "x86_64"
|
||||
gradio==3.50.*
|
||||
hqq==0.1.1.post1
|
||||
hqq==0.1.2
|
||||
lm_eval==0.3.0
|
||||
markdown
|
||||
numpy==1.24.*
|
||||
|
@ -4,7 +4,7 @@ datasets
|
||||
einops
|
||||
exllamav2==0.0.11
|
||||
gradio==3.50.*
|
||||
hqq==0.1.1.post1
|
||||
hqq==0.1.2
|
||||
lm_eval==0.3.0
|
||||
markdown
|
||||
numpy==1.24.*
|
||||
|
Loading…
Reference in New Issue
Block a user