Merge pull request #5181 from oobabooga/dev

Merge dev branch
This commit is contained in:
oobabooga 2024-01-05 18:42:30 -03:00 committed by GitHub
commit 8ea3f31601
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 81 additions and 64 deletions

View File

@ -18,7 +18,7 @@ with open(bias_file, "r") as f:
params = { params = {
"activate": True, "activate": True,
"bias string": " *I am so happy*", "bias string": " *I am so happy*",
"use custom string": False, "custom string": "",
} }
@ -44,7 +44,7 @@ def bot_prefix_modifier(string):
behavior. behavior.
""" """
if params['activate']: if params['activate']:
if params['use custom string']: if params['custom string'].strip() != '':
return f'{string} {params["custom string"].strip()} ' return f'{string} {params["custom string"].strip()} '
else: else:
return f'{string} {params["bias string"].strip()} ' return f'{string} {params["bias string"].strip()} '
@ -56,8 +56,7 @@ def ui():
# Gradio elements # Gradio elements
activate = gr.Checkbox(value=params['activate'], label='Activate character bias') activate = gr.Checkbox(value=params['activate'], label='Activate character bias')
dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file') dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file')
use_custom_string = gr.Checkbox(value=False, label='Use custom bias textbox instead of dropdown') custom_string = gr.Textbox(value=params['custom string'], placeholder="Enter custom bias string", label="Custom Character Bias", info='If not empty, will be used instead of the value above')
custom_string = gr.Textbox(value="", placeholder="Enter custom bias string", label="Custom Character Bias", info='To use this textbox activate the checkbox above')
# Event functions to update the parameters in the backend # Event functions to update the parameters in the backend
def update_bias_string(x): def update_bias_string(x):
@ -73,11 +72,3 @@ def ui():
dropdown_string.change(update_bias_string, dropdown_string, None) dropdown_string.change(update_bias_string, dropdown_string, None)
custom_string.change(update_custom_string, custom_string, None) custom_string.change(update_custom_string, custom_string, None)
activate.change(lambda x: params.update({"activate": x}), activate, None) activate.change(lambda x: params.update({"activate": x}), activate, None)
use_custom_string.change(lambda x: params.update({"use custom string": x}), use_custom_string, None)
# Group elements together depending on the selected option
def bias_string_group():
if use_custom_string.value:
return gr.Group([use_custom_string, custom_string])
else:
return dropdown_string

View File

@ -193,6 +193,7 @@ for (i = 0; i < slimDropdownElements.length; i++) {
var buttonsInChat = document.querySelectorAll("#chat-tab:not(.old-ui) #chat-buttons button"); var buttonsInChat = document.querySelectorAll("#chat-tab:not(.old-ui) #chat-buttons button");
var button = document.getElementById("hover-element-button"); var button = document.getElementById("hover-element-button");
var menu = document.getElementById("hover-menu"); var menu = document.getElementById("hover-menu");
var istouchscreen = (navigator.maxTouchPoints > 0) || "ontouchstart" in document.documentElement;
function showMenu() { function showMenu() {
menu.style.display = "flex"; // Show the menu menu.style.display = "flex"; // Show the menu
@ -200,7 +201,9 @@ function showMenu() {
function hideMenu() { function hideMenu() {
menu.style.display = "none"; // Hide the menu menu.style.display = "none"; // Hide the menu
document.querySelector("#chat-input textarea").focus(); if (!istouchscreen) {
document.querySelector("#chat-input textarea").focus(); // Focus on the chat input
}
} }
if (buttonsInChat.length > 0) { if (buttonsInChat.length > 0) {
@ -235,11 +238,18 @@ function isMouseOverButtonOrMenu() {
} }
button.addEventListener("mouseenter", function () { button.addEventListener("mouseenter", function () {
if (!istouchscreen) {
showMenu(); showMenu();
}
}); });
button.addEventListener("click", function () { button.addEventListener("click", function () {
if (menu.style.display === "flex") {
hideMenu();
}
else {
showMenu(); showMenu();
}
}); });
// Add event listener for mouseleave on the button // Add event listener for mouseleave on the button

View File

@ -560,17 +560,17 @@ def replace_character_names(text, name1, name2):
def generate_pfp_cache(character): def generate_pfp_cache(character):
cache_folder = Path("cache") cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists(): if not cache_folder.exists():
cache_folder.mkdir() cache_folder.mkdir()
for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]: for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]:
if path.exists(): if path.exists():
original_img = Image.open(path) original_img = Image.open(path)
original_img.save(Path('cache/pfp_character.png'), format='PNG') original_img.save(Path(f'{cache_folder}/pfp_character.png'), format='PNG')
thumb = make_thumbnail(original_img) thumb = make_thumbnail(original_img)
thumb.save(Path('cache/pfp_character_thumb.png'), format='PNG') thumb.save(Path(f'{cache_folder}/pfp_character_thumb.png'), format='PNG')
return thumb return thumb
@ -594,8 +594,9 @@ def load_character(character, name1, name2):
file_contents = open(filepath, 'r', encoding='utf-8').read() file_contents = open(filepath, 'r', encoding='utf-8').read()
data = json.loads(file_contents) if extension == "json" else yaml.safe_load(file_contents) data = json.loads(file_contents) if extension == "json" else yaml.safe_load(file_contents)
cache_folder = Path(shared.args.disk_cache_dir)
for path in [Path("cache/pfp_character.png"), Path("cache/pfp_character_thumb.png")]: for path in [Path(f"{cache_folder}/pfp_character.png"), Path(f"{cache_folder}/pfp_character_thumb.png")]:
if path.exists(): if path.exists():
path.unlink() path.unlink()
@ -713,17 +714,17 @@ def check_tavern_character(img):
def upload_your_profile_picture(img): def upload_your_profile_picture(img):
cache_folder = Path("cache") cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists(): if not cache_folder.exists():
cache_folder.mkdir() cache_folder.mkdir()
if img is None: if img is None:
if Path("cache/pfp_me.png").exists(): if Path(f"{cache_folder}/pfp_me.png").exists():
Path("cache/pfp_me.png").unlink() Path(f"{cache_folder}/pfp_me.png").unlink()
else: else:
img = make_thumbnail(img) img = make_thumbnail(img)
img.save(Path('cache/pfp_me.png')) img.save(Path(f'{cache_folder}/pfp_me.png'))
logger.info('Profile picture saved to "cache/pfp_me.png"') logger.info(f'Profile picture saved to "{cache_folder}/pfp_me.png"')
def generate_character_yaml(name, greeting, context): def generate_character_yaml(name, greeting, context):

View File

@ -8,6 +8,7 @@ import markdown
from PIL import Image, ImageOps from PIL import Image, ImageOps
from modules.utils import get_available_chat_styles from modules.utils import get_available_chat_styles
from modules import shared
# This is to store the paths to the thumbnails of the profile pictures # This is to store the paths to the thumbnails of the profile pictures
image_cache = {} image_cache = {}
@ -170,7 +171,7 @@ def make_thumbnail(image):
def get_image_cache(path): def get_image_cache(path):
cache_folder = Path("cache") cache_folder = Path(shared.args.disk_cache_dir)
if not cache_folder.exists(): if not cache_folder.exists():
cache_folder.mkdir() cache_folder.mkdir()
@ -178,8 +179,8 @@ def get_image_cache(path):
if (path in image_cache and mtime != image_cache[path][0]) or (path not in image_cache): if (path in image_cache and mtime != image_cache[path][0]) or (path not in image_cache):
img = make_thumbnail(Image.open(path)) img = make_thumbnail(Image.open(path))
old_p = Path(f'cache/{path.name}_cache.png') old_p = Path(f'{cache_folder}/{path.name}_cache.png')
p = Path(f'cache/cache_{path.name}.png') p = Path(f'{cache_folder}/cache_{path.name}.png')
if old_p.exists(): if old_p.exists():
old_p.rename(p) old_p.rename(p)

View File

@ -6,6 +6,7 @@ import torch
import yaml import yaml
from transformers import is_torch_xpu_available from transformers import is_torch_xpu_available
import extensions
from modules import shared from modules import shared
with open(Path(__file__).resolve().parent / '../css/NotoSans/stylesheet.css', 'r') as f: with open(Path(__file__).resolve().parent / '../css/NotoSans/stylesheet.css', 'r') as f:
@ -204,7 +205,7 @@ def apply_interface_values(state, use_persistent=False):
return [state[k] if k in state else gr.update() for k in elements] return [state[k] if k in state else gr.update() for k in elements]
def save_settings(state, preset, extensions, show_controls): def save_settings(state, preset, extensions_list, show_controls):
output = copy.deepcopy(shared.settings) output = copy.deepcopy(shared.settings)
exclude = ['name2', 'greeting', 'context', 'turn_template'] exclude = ['name2', 'greeting', 'context', 'turn_template']
for k in state: for k in state:
@ -215,10 +216,19 @@ def save_settings(state, preset, extensions, show_controls):
output['prompt-default'] = state['prompt_menu-default'] output['prompt-default'] = state['prompt_menu-default']
output['prompt-notebook'] = state['prompt_menu-notebook'] output['prompt-notebook'] = state['prompt_menu-notebook']
output['character'] = state['character_menu'] output['character'] = state['character_menu']
output['default_extensions'] = extensions output['default_extensions'] = extensions_list
output['seed'] = int(output['seed']) output['seed'] = int(output['seed'])
output['show_controls'] = show_controls output['show_controls'] = show_controls
# Save extension values in the UI
for extension_name in extensions_list:
extension = getattr(extensions, extension_name).script
if hasattr(extension, 'params'):
params = getattr(extension, 'params')
for param in params:
_id = f"{extension_name}-{param}"
output[_id] = params[param]
return yaml.dump(output, sort_keys=False, width=float("inf")) return yaml.dump(output, sort_keys=False, width=float("inf"))

View File

@ -28,7 +28,7 @@ def create_ui(default_preset):
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens']) shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
shared.gradio['temperature'] = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label='temperature') shared.gradio['temperature'] = gr.Slider(0.01, 5, value=generate_params['temperature'], step=0.01, label='temperature')
shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p') shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p')
shared.gradio['min_p'] = gr.Slider(0.0, 1.0, value=generate_params['min_p'], step=0.01, label='min_p') shared.gradio['min_p'] = gr.Slider(0.0, 1.0, value=generate_params['min_p'], step=0.01, label='min_p')
shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k') shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k')

View File

@ -89,6 +89,7 @@ def torch_version():
torver = [line for line in torch_version_file if '__version__' in line][0].split('__version__ = ')[1].strip("'") torver = [line for line in torch_version_file if '__version__' in line][0].split('__version__ = ')[1].strip("'")
else: else:
from torch import __version__ as torver from torch import __version__ as torver
return torver return torver
@ -185,15 +186,28 @@ def install_webui():
print("Invalid choice. Please try again.") print("Invalid choice. Please try again.")
choice = input("Input> ").upper() choice = input("Input> ").upper()
if choice == "N": gpu_choice_to_name = {
print_big_message("Once the installation ends, make sure to open CMD_FLAGS.txt with\na text editor and add the --cpu flag.") "A": "NVIDIA",
"B": "AMD",
"C": "APPLE",
"D": "INTEL",
"N": "NONE"
}
selected_gpu = gpu_choice_to_name[choice]
if selected_gpu == "NONE":
with open(cmd_flags_path, 'r+') as cmd_flags_file:
if "--cpu" not in cmd_flags_file.read():
print_big_message("Adding the --cpu flag to CMD_FLAGS.txt.")
cmd_flags_file.write("\n--cpu")
# Find the proper Pytorch installation command # Find the proper Pytorch installation command
install_git = "conda install -y -k ninja git" install_git = "conda install -y -k ninja git"
install_pytorch = "python -m pip install torch torchvision torchaudio" install_pytorch = "python -m pip install torch==2.1.* torchvision==0.16.* torchaudio==2.1.* "
use_cuda118 = "N" use_cuda118 = "N"
if any((is_windows(), is_linux())) and choice == "A": if any((is_windows(), is_linux())) and selected_gpu == "NVIDIA":
if "USE_CUDA118" in os.environ: if "USE_CUDA118" in os.environ:
use_cuda118 = "Y" if os.environ.get("USE_CUDA118", "").lower() in ("yes", "y", "true", "1", "t", "on") else "N" use_cuda118 = "Y" if os.environ.get("USE_CUDA118", "").lower() in ("yes", "y", "true", "1", "t", "on") else "N"
else: else:
@ -203,29 +217,30 @@ def install_webui():
while use_cuda118 not in 'YN': while use_cuda118 not in 'YN':
print("Invalid choice. Please try again.") print("Invalid choice. Please try again.")
use_cuda118 = input("Input> ").upper().strip('"\'').strip() use_cuda118 = input("Input> ").upper().strip('"\'').strip()
if use_cuda118 == 'Y': if use_cuda118 == 'Y':
print("CUDA: 11.8") print("CUDA: 11.8")
install_pytorch += "--index-url https://download.pytorch.org/whl/cu118"
else: else:
print("CUDA: 12.1") print("CUDA: 12.1")
install_pytorch += "--index-url https://download.pytorch.org/whl/cu121"
install_pytorch = f"python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/{'cu121' if use_cuda118 == 'N' else 'cu118'}" elif not is_macos() and selected_gpu == "AMD":
elif not is_macos() and choice == "B":
if is_linux(): if is_linux():
install_pytorch = "python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6" install_pytorch += "--index-url https://download.pytorch.org/whl/rocm5.6"
else: else:
print("AMD GPUs are only supported on Linux. Exiting...") print("AMD GPUs are only supported on Linux. Exiting...")
sys.exit(1) sys.exit(1)
elif is_linux() and (choice == "C" or choice == "N"): elif is_linux() and selected_gpu in ["APPLE", "NONE"]:
install_pytorch = "python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu" install_pytorch += "--index-url https://download.pytorch.org/whl/cpu"
elif choice == "D": elif selected_gpu == "INTEL":
install_pytorch = "python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 intel_extension_for_pytorch==2.1.10+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/" install_pytorch += "intel_extension_for_pytorch==2.1.* --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
# Install Git and then Pytorch # Install Git and then Pytorch
print_big_message("Installing PyTorch.") print_big_message("Installing PyTorch.")
run_cmd(f"{install_git} && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True) run_cmd(f"{install_git} && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True)
# Install CUDA libraries (this wasn't necessary for Pytorch before...) # Install CUDA libraries (this wasn't necessary for Pytorch before...)
if choice == "A": if selected_gpu == "NVIDIA":
print_big_message("Installing the CUDA runtime libraries.") print_big_message("Installing the CUDA runtime libraries.")
run_cmd(f"conda install -y -c \"nvidia/label/{'cuda-12.1.1' if use_cuda118 == 'N' else 'cuda-11.8.0'}\" cuda-runtime", assert_success=True, environment=True) run_cmd(f"conda install -y -c \"nvidia/label/{'cuda-12.1.1' if use_cuda118 == 'N' else 'cuda-11.8.0'}\" cuda-runtime", assert_success=True, environment=True)
@ -283,25 +298,15 @@ def update_requirements(initial_installation=False):
is_cpu = '+cpu' in torver # 2.0.1+cpu is_cpu = '+cpu' in torver # 2.0.1+cpu
if is_rocm: if is_rocm:
if cpu_has_avx2(): base_requirements = "requirements_amd" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt"
requirements_file = "requirements_amd.txt" elif is_cpu or is_intel:
else: base_requirements = "requirements_cpu_only" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt"
requirements_file = "requirements_amd_noavx2.txt"
elif is_cpu:
if cpu_has_avx2():
requirements_file = "requirements_cpu_only.txt"
else:
requirements_file = "requirements_cpu_only_noavx2.txt"
elif is_macos(): elif is_macos():
if is_x86_64(): base_requirements = "requirements_apple_" + ("intel" if is_x86_64() else "silicon") + ".txt"
requirements_file = "requirements_apple_intel.txt"
else: else:
requirements_file = "requirements_apple_silicon.txt" base_requirements = "requirements" + ("_noavx2" if not cpu_has_avx2() else "") + ".txt"
else:
if cpu_has_avx2(): requirements_file = base_requirements
requirements_file = "requirements.txt"
else:
requirements_file = "requirements_noavx2.txt"
print_big_message(f"Installing webui requirements from file: {requirements_file}") print_big_message(f"Installing webui requirements from file: {requirements_file}")
print(f"TORCH: {torver}\n") print(f"TORCH: {torver}\n")
@ -346,10 +351,6 @@ def update_requirements(initial_installation=False):
clear_cache() clear_cache()
def download_model():
run_cmd("python download-model.py", environment=True)
def launch_webui(): def launch_webui():
run_cmd(f"python server.py {flags}", environment=True) run_cmd(f"python server.py {flags}", environment=True)

3
setup.cfg Normal file
View File

@ -0,0 +1,3 @@
[pycodestyle]
max-line-length = 120
ignore = E402, E501, E722