From 7d97da1dcb21962a80fed4abe3d9dd3dc0171bda Mon Sep 17 00:00:00 2001 From: Wojtek Kowaluk Date: Sat, 18 Mar 2023 00:17:05 +0100 Subject: [PATCH 1/4] add venv paths to gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 1b7f0fb8..e2017e49 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,6 @@ img_me* !models/place-your-models-here.txt !softprompts/place-your-softprompts-here.txt !torch-dumps/place-your-pt-models-here.txt + +venv/ +.venv/ From 30939e2aee539bd3a46573aa1ed86168b31fddf4 Mon Sep 17 00:00:00 2001 From: Wojtek Kowaluk Date: Sat, 18 Mar 2023 00:56:23 +0100 Subject: [PATCH 2/4] add mps support on apple silicon --- modules/models.py | 9 ++++++++- modules/text_generation.py | 4 ++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/modules/models.py b/modules/models.py index 63060d43..c37b0586 100644 --- a/modules/models.py +++ b/modules/models.py @@ -46,6 +46,13 @@ def load_model(model_name): if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) + if torch.has_mps: + model = AutoModelForCausalLM.from_pretrained( + Path(f"models/{shared.model_name}"),low_cpu_mem_usage=True, + torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16 + ) + device = torch.device('mps') + model = model.to(device) else: model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda() @@ -97,7 +104,7 @@ def load_model(model_name): # Custom else: params = {"low_cpu_mem_usage": True} - if not shared.args.cpu and not torch.cuda.is_available(): + if not shared.args.cpu and not torch.cuda.is_available() and not torch.has_mps: print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n") shared.args.cpu = True diff --git a/modules/text_generation.py b/modules/text_generation.py index e5b4ad91..3a7bfa6e 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -33,9 +33,13 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True): return input_ids.numpy() elif shared.args.deepspeed: return input_ids.to(device=local_rank) + elif torch.has_mps: + device = torch.device('mps') + return input_ids.to(device) else: return input_ids.cuda() + def decode(output_ids): # Open Assistant relies on special tokens like <|endoftext|> if re.match('(oasst|galactica)-*', shared.model_name.lower()): From 7994b580d5c9fd4691276f40f7a9b6a204ce2564 Mon Sep 17 00:00:00 2001 From: Wojtek Kowaluk Date: Sat, 18 Mar 2023 02:27:26 +0100 Subject: [PATCH 3/4] clean up duplicated code --- modules/models.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/modules/models.py b/modules/models.py index c37b0586..8fa7307e 100644 --- a/modules/models.py +++ b/modules/models.py @@ -46,15 +46,17 @@ def load_model(model_name): if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) - if torch.has_mps: - model = AutoModelForCausalLM.from_pretrained( - Path(f"models/{shared.model_name}"),low_cpu_mem_usage=True, - torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16 - ) - device = torch.device('mps') - model = model.to(device) else: - model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda() + model = AutoModelForCausalLM.from_pretrained( + Path(f"models/{shared.model_name}"), + low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16 + ) + if torch.has_mps: + device = torch.device('mps') + model = model.to(device) + else: + model = model.cuda() + # FlexGen elif shared.args.flexgen: From e26763a51017988f3c95feafe3458e1b8ecbb4a3 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 17 Mar 2023 22:56:46 -0300 Subject: [PATCH 4/4] Minor changes --- .gitignore | 5 ++--- modules/models.py | 8 ++------ modules/text_generation.py | 1 - 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index e2017e49..d98b81d8 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ torch-dumps/* *pycache* */*pycache* */*/pycache* +venv/ +.venv/ settings.json img_bot* @@ -19,6 +21,3 @@ img_me* !models/place-your-models-here.txt !softprompts/place-your-softprompts-here.txt !torch-dumps/place-your-pt-models-here.txt - -venv/ -.venv/ diff --git a/modules/models.py b/modules/models.py index 8fa7307e..f07e738b 100644 --- a/modules/models.py +++ b/modules/models.py @@ -47,17 +47,13 @@ def load_model(model_name): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) else: - model = AutoModelForCausalLM.from_pretrained( - Path(f"models/{shared.model_name}"), - low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16 - ) + model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16) if torch.has_mps: device = torch.device('mps') model = model.to(device) else: model = model.cuda() - # FlexGen elif shared.args.flexgen: # Initialize environment @@ -106,7 +102,7 @@ def load_model(model_name): # Custom else: params = {"low_cpu_mem_usage": True} - if not shared.args.cpu and not torch.cuda.is_available() and not torch.has_mps: + if not any((shared.args.cpu, torch.cuda.is_available(), torch.has_mps)): print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n") shared.args.cpu = True diff --git a/modules/text_generation.py b/modules/text_generation.py index 3a7bfa6e..1d11de12 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -39,7 +39,6 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True): else: return input_ids.cuda() - def decode(output_ids): # Open Assistant relies on special tokens like <|endoftext|> if re.match('(oasst|galactica)-*', shared.model_name.lower()):