From f27135bdd3ddeb8b48c2f0e5109d98500f588047 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 28 May 2023 22:42:43 -0300 Subject: [PATCH] Add Eta Sampling preset Also remove some presets that I do not consider relevant --- README.md | 3 +-- presets/Default.yaml | 6 ------ presets/MOSS.yaml | 3 --- ...stive Search.yaml => Special-Contrastive Search.yaml} | 0 presets/Special-Eta Sampling.yaml | 2 ++ presets/Verbose (Beam Search).yaml | 9 --------- server.py | 6 +++--- 7 files changed, 6 insertions(+), 23 deletions(-) delete mode 100644 presets/Default.yaml delete mode 100644 presets/MOSS.yaml rename presets/{Contrastive Search.yaml => Special-Contrastive Search.yaml} (100%) create mode 100644 presets/Special-Eta Sampling.yaml delete mode 100644 presets/Verbose (Beam Search).yaml diff --git a/README.md b/README.md index 580323c3..c5d6217b 100644 --- a/README.md +++ b/README.md @@ -321,7 +321,7 @@ Out of memory errors? [Check the low VRAM guide](docs/Low-VRAM-guide.md). Inference settings presets can be created under `presets/` as text files. These files are detected automatically at startup. -By default, 10 presets by NovelAI and KoboldAI are included. These were selected out of a sample of 43 presets after applying a K-Means clustering algorithm and selecting the elements closest to the average of each cluster. +By default, 10 presets based on NovelAI and KoboldAI presets are included. These were selected out of a sample of 43 presets after applying a K-Means clustering algorithm and selecting the elements closest to the average of each cluster. [Visualization](https://user-images.githubusercontent.com/112222186/228956352-1addbdb9-2456-465a-b51d-089f462cd385.png) @@ -345,6 +345,5 @@ Before reporting a bug, make sure that you have: ## Credits - Gradio dropdown menu refresh button, code for reloading the interface: https://github.com/AUTOMATIC1111/stable-diffusion-webui -- Verbose preset: Anonymous 4chan user. - NovelAI and KoboldAI presets: https://github.com/KoboldAI/KoboldAI-Client/wiki/Settings-Presets - Code for early stopping in chat mode, code for some of the sliders: https://github.com/PygmalionAI/gradio-ui/ diff --git a/presets/Default.yaml b/presets/Default.yaml deleted file mode 100644 index 0b1b8b7a..00000000 --- a/presets/Default.yaml +++ /dev/null @@ -1,6 +0,0 @@ -do_sample: true -top_p: 0.5 -top_k: 40 -temperature: 0.7 -repetition_penalty: 1.2 -typical_p: 1.0 diff --git a/presets/MOSS.yaml b/presets/MOSS.yaml deleted file mode 100644 index 6eccc945..00000000 --- a/presets/MOSS.yaml +++ /dev/null @@ -1,3 +0,0 @@ -temperature: 0.7 -top_p: 0.8 -repetition_penalty: 1.02 diff --git a/presets/Contrastive Search.yaml b/presets/Special-Contrastive Search.yaml similarity index 100% rename from presets/Contrastive Search.yaml rename to presets/Special-Contrastive Search.yaml diff --git a/presets/Special-Eta Sampling.yaml b/presets/Special-Eta Sampling.yaml new file mode 100644 index 00000000..141618a8 --- /dev/null +++ b/presets/Special-Eta Sampling.yaml @@ -0,0 +1,2 @@ +do_sample: true +eta_cutoff: 3 diff --git a/presets/Verbose (Beam Search).yaml b/presets/Verbose (Beam Search).yaml deleted file mode 100644 index bd75fca8..00000000 --- a/presets/Verbose (Beam Search).yaml +++ /dev/null @@ -1,9 +0,0 @@ -num_beams: 10 -min_length: 200 -length_penalty: 1.4 -no_repeat_ngram_size: 2 -early_stopping: true -temperature: 0.7 -top_k: 150 -top_p: 0.92 -repetition_penalty: 4.5 diff --git a/server.py b/server.py index 1bfd3044..15dd90da 100644 --- a/server.py +++ b/server.py @@ -91,7 +91,7 @@ def load_preset_values(preset_menu, state, return_dict=False): 'eta_cutoff': 0, 'repetition_penalty': 1, 'encoder_repetition_penalty': 1, - 'top_k': 50, + 'top_k': 0, 'num_beams': 1, 'penalty_alpha': 0, 'min_length': 0, @@ -470,8 +470,8 @@ def create_settings_menus(default_preset): shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p', info='If not set to 1, select tokens with probabilities adding up to less than this number. Higher value = higher range of possible random results.') shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k', info='Similar to top_p, but select instead only the top_k most likely tokens. Higher value = higher range of possible random results.') shared.gradio['typical_p'] = gr.Slider(0.0, 1.0, value=generate_params['typical_p'], step=0.01, label='typical_p', info='If not set to 1, select only tokens that are at least this much more likely to appear than random tokens, given the prior text.') - shared.gradio['epsilon_cutoff'] = gr.Slider(0, 9, value=generate_params['epsilon_cutoff'], step=0.01, label='epsilon_cutoff', info='In units of 1e-4') - shared.gradio['eta_cutoff'] = gr.Slider(0, 20, value=generate_params['eta_cutoff'], step=0.01, label='eta_cutoff', info='In units of 1e-4') + shared.gradio['epsilon_cutoff'] = gr.Slider(0, 9, value=generate_params['epsilon_cutoff'], step=0.01, label='epsilon_cutoff', info='In units of 1e-4; a reasonable value is 3. This sets a probability floor below which tokens are excluded from being sampled. Should be used with top_p, top_k, and eta_cutoff set to 0.') + shared.gradio['eta_cutoff'] = gr.Slider(0, 20, value=generate_params['eta_cutoff'], step=0.01, label='eta_cutoff', info='In units of 1e-4; a reasonable value is 3. Should be used with top_p, top_k, and epsilon_cutoff set to 0.') with gr.Column(): shared.gradio['repetition_penalty'] = gr.Slider(1.0, 1.5, value=generate_params['repetition_penalty'], step=0.01, label='repetition_penalty', info='Exponential penalty factor for repeating prior tokens. 1 means no penalty, higher value = less repetition, lower value = more repetition.')