From 4dd14dcab4778b2d4e031db9cdfa94a2e1fe13e6 Mon Sep 17 00:00:00 2001 From: Chimdumebi Nebolisa <78305519+MichealC0@users.noreply.github.com> Date: Thu, 9 Mar 2023 10:22:09 +0100 Subject: [PATCH 1/8] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9efacb7c..23d53604 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ The third line assumes that you have an NVIDIA GPU. pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2 ``` -* If you are running in CPU mode, replace the third command with this one: +* If you are running it in CPU mode, replace the third command with this one: ``` conda install pytorch torchvision torchaudio git -c pytorch From ec3de0495c52a6d81495ac0553f4a7a886e4e0c8 Mon Sep 17 00:00:00 2001 From: Ber Zoidberg Date: Thu, 9 Mar 2023 19:08:09 -0800 Subject: [PATCH 2/8] download tokenizer when present --- download-model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/download-model.py b/download-model.py index 599418fc..27fbffda 100644 --- a/download-model.py +++ b/download-model.py @@ -107,9 +107,10 @@ def get_download_links_from_huggingface(model, branch): is_pytorch = re.match("pytorch_model.*\.bin", fname) is_safetensors = re.match("model.*\.safetensors", fname) + is_tokenizer = re.match("tokenizer.*\.model", fname) is_text = re.match(".*\.(txt|json)", fname) - if is_text or is_safetensors or is_pytorch: + if any((is_pytorch, is_safetensors, is_text, is_tokenizer)): if is_text: links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}") classifications.append('text') From 249c268176114e72da3e82d7e2c652481060f44f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 10 Mar 2023 00:41:10 -0300 Subject: [PATCH 3/8] Fix the download script for long lists of files on HF --- download-model.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/download-model.py b/download-model.py index 599418fc..98b57bb0 100644 --- a/download-model.py +++ b/download-model.py @@ -5,7 +5,9 @@ Example: python download-model.py facebook/opt-1.3b ''' + import argparse +import base64 import json import multiprocessing import re @@ -93,14 +95,18 @@ facebook/opt-1.3b def get_download_links_from_huggingface(model, branch): base = "https://huggingface.co" page = f"/api/models/{model}/tree/{branch}?cursor=" + cursor = b"" links = [] classifications = [] has_pytorch = False has_safetensors = False - while page is not None: - content = requests.get(f"{base}{page}").content + while True: + content = requests.get(f"{base}{page}{cursor.decode()}").content + dict = json.loads(content) + if len(dict) == 0: + break for i in range(len(dict)): fname = dict[i]['path'] @@ -123,8 +129,9 @@ def get_download_links_from_huggingface(model, branch): has_pytorch = True classifications.append('pytorch') - #page = dict['nextUrl'] - page = None + cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50' + cursor = base64.b64encode(cursor) + cursor = cursor.replace(b'=', b'%3D') # If both pytorch and safetensors are available, download safetensors only if has_pytorch and has_safetensors: From 875847bf88c52166c4e9a0cc35f7e6c535b88d97 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 10 Mar 2023 00:45:28 -0300 Subject: [PATCH 4/8] Consider tokenizer a type of text --- download-model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/download-model.py b/download-model.py index 27fbffda..bf94be7c 100644 --- a/download-model.py +++ b/download-model.py @@ -108,7 +108,7 @@ def get_download_links_from_huggingface(model, branch): is_pytorch = re.match("pytorch_model.*\.bin", fname) is_safetensors = re.match("model.*\.safetensors", fname) is_tokenizer = re.match("tokenizer.*\.model", fname) - is_text = re.match(".*\.(txt|json)", fname) + is_text = re.match(".*\.(txt|json)", fname) or is_tokenizer if any((is_pytorch, is_safetensors, is_text, is_tokenizer)): if is_text: From ab470444591e425290db72db9ebc3127f5520449 Mon Sep 17 00:00:00 2001 From: deepdiffuser Date: Fri, 10 Mar 2023 04:29:09 -0800 Subject: [PATCH 5/8] add multi-gpu support for 4bit gptq LLaMA --- modules/models.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/modules/models.py b/modules/models.py index 3e6cea18..14443c89 100644 --- a/modules/models.py +++ b/modules/models.py @@ -110,7 +110,18 @@ def load_model(model_name): exit() model = load_quant(path_to_model, Path(f"models/{pt_model}"), 4) - model = model.to(torch.device('cuda:0')) + + if shared.args.gpu_memory: + max_memory = {} + for i in range(len(shared.args.gpu_memory)): + max_memory[i] = f"{shared.args.gpu_memory[i]}GiB" + max_memory['cpu'] = f"{shared.args.cpu_memory or '99'}GiB" + + import accelerate + device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory) + model = accelerate.dispatch_model(model, device_map=device_map) + else: + model = model.to(torch.device('cuda:0')) # Custom else: From 9fbd60bf22c6a2e9cef0cade23a4933547df9114 Mon Sep 17 00:00:00 2001 From: deepdiffuser Date: Fri, 10 Mar 2023 05:30:47 -0800 Subject: [PATCH 6/8] add no_split_module_classes to prevent tensor split error --- modules/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/models.py b/modules/models.py index 14443c89..986cd73a 100644 --- a/modules/models.py +++ b/modules/models.py @@ -118,7 +118,7 @@ def load_model(model_name): max_memory['cpu'] = f"{shared.args.cpu_memory or '99'}GiB" import accelerate - device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory) + device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LLaMADecoderLayer"]) model = accelerate.dispatch_model(model, device_map=device_map) else: model = model.to(torch.device('cuda:0')) From e461c0b7a0769c4df3aa96505803b004a1071c2e Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 10 Mar 2023 10:51:12 -0300 Subject: [PATCH 7/8] Move the import to the top --- modules/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/models.py b/modules/models.py index 986cd73a..f4c1071d 100644 --- a/modules/models.py +++ b/modules/models.py @@ -112,12 +112,13 @@ def load_model(model_name): model = load_quant(path_to_model, Path(f"models/{pt_model}"), 4) if shared.args.gpu_memory: + import accelerate + max_memory = {} for i in range(len(shared.args.gpu_memory)): max_memory[i] = f"{shared.args.gpu_memory[i]}GiB" max_memory['cpu'] = f"{shared.args.cpu_memory or '99'}GiB" - import accelerate device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LLaMADecoderLayer"]) model = accelerate.dispatch_model(model, device_map=device_map) else: From de7dd8b6aa3aa00ba629c9ba6ce1bc32bd213d2f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 10 Mar 2023 10:54:08 -0300 Subject: [PATCH 8/8] Add comments --- modules/models.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/models.py b/modules/models.py index f4c1071d..a5ec59d1 100644 --- a/modules/models.py +++ b/modules/models.py @@ -111,6 +111,7 @@ def load_model(model_name): model = load_quant(path_to_model, Path(f"models/{pt_model}"), 4) + # Multi-GPU setup if shared.args.gpu_memory: import accelerate @@ -121,6 +122,8 @@ def load_model(model_name): device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LLaMADecoderLayer"]) model = accelerate.dispatch_model(model, device_map=device_map) + + # Single GPU else: model = model.to(torch.device('cuda:0'))