Allow downloading single file from UI (#3737)

This commit is contained in:
missionfloyd 2023-08-29 20:32:36 -06:00 committed by GitHub
parent f63dd83631
commit 787219267c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 33 additions and 24 deletions

View File

@ -164,8 +164,6 @@ text-generation-webui
│   ├── llama-13b.ggmlv3.q4_K_M.bin │   ├── llama-13b.ggmlv3.q4_K_M.bin
``` ```
Those models must be downloaded manually, as they are not currently supported by the automated downloader.
#### GPT-4chan #### GPT-4chan
<details> <details>

View File

@ -73,7 +73,7 @@ class ModelDownloader:
for i in range(len(dict)): for i in range(len(dict)):
fname = dict[i]['path'] fname = dict[i]['path']
if specific_file is not None and fname != specific_file: if specific_file not in [None, ''] and fname != specific_file:
continue continue
if not is_lora and fname.endswith(('adapter_config.json', 'adapter_model.bin')): if not is_lora and fname.endswith(('adapter_config.json', 'adapter_model.bin')):
@ -175,26 +175,28 @@ class ModelDownloader:
f.write(data) f.write(data)
if total_size != 0 and self.progress_bar is not None: if total_size != 0 and self.progress_bar is not None:
count += len(data) count += len(data)
self.progress_bar(float(count) / float(total_size), f"Downloading {filename}") self.progress_bar(float(count) / float(total_size), f"{filename}")
def start_download_threads(self, file_list, output_folder, start_from_scratch=False, threads=1): def start_download_threads(self, file_list, output_folder, start_from_scratch=False, threads=1):
thread_map(lambda url: self.get_single_file(url, output_folder, start_from_scratch=start_from_scratch), file_list, max_workers=threads, disable=True) thread_map(lambda url: self.get_single_file(url, output_folder, start_from_scratch=start_from_scratch), file_list, max_workers=threads, disable=True)
def download_model_files(self, model, branch, links, sha256, output_folder, progress_bar=None, start_from_scratch=False, threads=1, specific_file=None): def download_model_files(self, model, branch, links, sha256, output_folder, progress_bar=None, start_from_scratch=False, threads=1, specific_file=None, is_llamacpp=False):
self.progress_bar = progress_bar self.progress_bar = progress_bar
# Creating the folder and writing the metadata # Create the folder and writing the metadata
output_folder.mkdir(parents=True, exist_ok=True) output_folder.mkdir(parents=True, exist_ok=True)
metadata = f'url: https://huggingface.co/{model}\n' \
f'branch: {branch}\n' \
f'download date: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n'
sha256_str = '\n'.join([f' {item[1]} {item[0]}' for item in sha256]) if not is_llamacpp:
if sha256_str: metadata = f'url: https://huggingface.co/{model}\n' \
metadata += f'sha256sum:\n{sha256_str}' f'branch: {branch}\n' \
f'download date: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n'
metadata += '\n' sha256_str = '\n'.join([f' {item[1]} {item[0]}' for item in sha256])
(output_folder / 'huggingface-metadata.txt').write_text(metadata) if sha256_str:
metadata += f'sha256sum:\n{sha256_str}'
metadata += '\n'
(output_folder / 'huggingface-metadata.txt').write_text(metadata)
if specific_file: if specific_file:
print(f"Downloading {specific_file} to {output_folder}") print(f"Downloading {specific_file} to {output_folder}")
@ -270,4 +272,4 @@ if __name__ == '__main__':
downloader.check_model_files(model, branch, links, sha256, output_folder) downloader.check_model_files(model, branch, links, sha256, output_folder)
else: else:
# Download files # Download files
downloader.download_model_files(model, branch, links, sha256, output_folder, specific_file=specific_file, threads=args.threads) downloader.download_model_files(model, branch, links, sha256, output_folder, specific_file=specific_file, threads=args.threads, is_llamacpp=is_llamacpp)

View File

@ -3,6 +3,7 @@ import math
import re import re
import traceback import traceback
from functools import partial from functools import partial
from pathlib import Path
import gradio as gr import gradio as gr
import psutil import psutil
@ -119,14 +120,17 @@ def create_ui():
shared.gradio['gptq_for_llama_info'] = gr.Markdown('GPTQ-for-LLaMa support is currently only kept for compatibility with older GPUs. AutoGPTQ or ExLlama is preferred when compatible. GPTQ-for-LLaMa is installed by default with the webui on supported systems. Otherwise, it has to be installed manually following the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#installation-1).') shared.gradio['gptq_for_llama_info'] = gr.Markdown('GPTQ-for-LLaMa support is currently only kept for compatibility with older GPUs. AutoGPTQ or ExLlama is preferred when compatible. GPTQ-for-LLaMa is installed by default with the webui on supported systems. Otherwise, it has to be installed manually following the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#installation-1).')
shared.gradio['exllama_info'] = gr.Markdown('For more information, consult the [docs](https://github.com/oobabooga/text-generation-webui/blob/main/docs/ExLlama.md).') shared.gradio['exllama_info'] = gr.Markdown('For more information, consult the [docs](https://github.com/oobabooga/text-generation-webui/blob/main/docs/ExLlama.md).')
shared.gradio['exllama_HF_info'] = gr.Markdown('ExLlama_HF is a wrapper that lets you use ExLlama like a Transformers model, which means it can use the Transformers samplers. It\'s a bit slower than the regular ExLlama.') shared.gradio['exllama_HF_info'] = gr.Markdown('ExLlama_HF is a wrapper that lets you use ExLlama like a Transformers model, which means it can use the Transformers samplers. It\'s a bit slower than the regular ExLlama.')
shared.gradio['llamacpp_HF_info'] = gr.Markdown('llamacpp_HF is a wrapper that lets you use llama.cpp like a Transformers model, which means it can use the Transformers samplers. To use it, make sure to first download oobabooga/llama-tokenizer under "Download custom model or LoRA".') shared.gradio['llamacpp_HF_info'] = gr.Markdown('llamacpp_HF is a wrapper that lets you use llama.cpp like a Transformers model, which means it can use the Transformers samplers. To use it, make sure to first download oobabooga/llama-tokenizer under "Download model or LoRA".')
with gr.Column(): with gr.Column():
with gr.Row(): with gr.Row():
shared.gradio['autoload_model'] = gr.Checkbox(value=shared.settings['autoload_model'], label='Autoload the model', info='Whether to load the model as soon as it is selected in the Model dropdown.') shared.gradio['autoload_model'] = gr.Checkbox(value=shared.settings['autoload_model'], label='Autoload the model', info='Whether to load the model as soon as it is selected in the Model dropdown.')
shared.gradio['custom_model_menu'] = gr.Textbox(label="Download custom model or LoRA", info="Enter the Hugging Face username/model path, for instance: facebook/galactica-125m. To specify a branch, add it at the end after a \":\" character like this: facebook/galactica-125m:main") shared.gradio['custom_model_menu'] = gr.Textbox(label="Download model or LoRA", info="Enter the Hugging Face username/model path, for instance: facebook/galactica-125m. To specify a branch, add it at the end after a \":\" character like this: facebook/galactica-125m:main. To download a single file, enter its name in the second box.")
shared.gradio['download_model_button'] = gr.Button("Download") shared.gradio['download_specific_file'] = gr.Textbox(placeholder="File name (for GGUF/GGML)", show_label=False, max_lines=1)
with gr.Row():
shared.gradio['download_model_button'] = gr.Button("Download", variant='primary')
shared.gradio['get_file_list'] = gr.Button("Get file list")
with gr.Row(): with gr.Row():
shared.gradio['model_status'] = gr.Markdown('No model is loaded' if shared.model_name == 'None' else 'Ready') shared.gradio['model_status'] = gr.Markdown('No model is loaded' if shared.model_name == 'None' else 'Ready')
@ -170,7 +174,8 @@ def create_event_handlers():
save_model_settings, gradio('model_menu', 'interface_state'), gradio('model_status'), show_progress=False) save_model_settings, gradio('model_menu', 'interface_state'), gradio('model_status'), show_progress=False)
shared.gradio['lora_menu_apply'].click(load_lora_wrapper, gradio('lora_menu'), gradio('model_status'), show_progress=False) shared.gradio['lora_menu_apply'].click(load_lora_wrapper, gradio('lora_menu'), gradio('model_status'), show_progress=False)
shared.gradio['download_model_button'].click(download_model_wrapper, gradio('custom_model_menu'), gradio('model_status'), show_progress=True) shared.gradio['download_model_button'].click(download_model_wrapper, gradio('custom_model_menu', 'download_specific_file'), gradio('model_status'), show_progress=True)
shared.gradio['get_file_list'].click(partial(download_model_wrapper, return_links=True), gradio('custom_model_menu', 'download_specific_file'), gradio('model_status'), show_progress=True)
shared.gradio['autoload_model'].change(lambda x: gr.update(visible=not x), gradio('autoload_model'), gradio('load_model')) shared.gradio['autoload_model'].change(lambda x: gr.update(visible=not x), gradio('autoload_model'), gradio('load_model'))
@ -206,7 +211,7 @@ def load_lora_wrapper(selected_loras):
yield ("Successfuly applied the LoRAs") yield ("Successfuly applied the LoRAs")
def download_model_wrapper(repo_id, progress=gr.Progress()): def download_model_wrapper(repo_id, specific_file, progress=gr.Progress(), return_links=False):
try: try:
downloader_module = importlib.import_module("download-model") downloader_module = importlib.import_module("download-model")
downloader = downloader_module.ModelDownloader() downloader = downloader_module.ModelDownloader()
@ -220,11 +225,15 @@ def download_model_wrapper(repo_id, progress=gr.Progress()):
model, branch = downloader.sanitize_model_and_branch_names(model, branch) model, branch = downloader.sanitize_model_and_branch_names(model, branch)
yield ("Getting the download links from Hugging Face") yield ("Getting the download links from Hugging Face")
links, sha256, is_lora = downloader.get_download_links_from_huggingface(model, branch, text_only=False) links, sha256, is_lora, is_llamacpp = downloader.get_download_links_from_huggingface(model, branch, text_only=False, specific_file=specific_file)
if return_links:
yield '\n\n'.join([f"`{Path(link).name}`" for link in links])
return
yield ("Getting the output folder") yield ("Getting the output folder")
base_folder = shared.args.lora_dir if is_lora else shared.args.model_dir base_folder = shared.args.lora_dir if is_lora else shared.args.model_dir
output_folder = downloader.get_output_folder(model, branch, is_lora, base_folder=base_folder) output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp, base_folder=base_folder)
if check: if check:
progress(0.5) progress(0.5)
@ -232,8 +241,8 @@ def download_model_wrapper(repo_id, progress=gr.Progress()):
downloader.check_model_files(model, branch, links, sha256, output_folder) downloader.check_model_files(model, branch, links, sha256, output_folder)
progress(1.0) progress(1.0)
else: else:
yield (f"Downloading files to {output_folder}") yield (f"Downloading file{'s' if len(links) > 1 else ''} to `{output_folder}/`")
downloader.download_model_files(model, branch, links, sha256, output_folder, progress_bar=progress, threads=1) downloader.download_model_files(model, branch, links, sha256, output_folder, progress_bar=progress, threads=1, is_llamacpp=is_llamacpp)
yield ("Done!") yield ("Done!")
except: except:
progress(1.0) progress(1.0)