mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
add mps support on apple silicon
This commit is contained in:
parent
7d97da1dcb
commit
30939e2aee
@ -46,6 +46,13 @@ def load_model(model_name):
|
||||
if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
|
||||
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
|
||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
|
||||
if torch.has_mps:
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
Path(f"models/{shared.model_name}"),low_cpu_mem_usage=True,
|
||||
torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16
|
||||
)
|
||||
device = torch.device('mps')
|
||||
model = model.to(device)
|
||||
else:
|
||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda()
|
||||
|
||||
@ -97,7 +104,7 @@ def load_model(model_name):
|
||||
# Custom
|
||||
else:
|
||||
params = {"low_cpu_mem_usage": True}
|
||||
if not shared.args.cpu and not torch.cuda.is_available():
|
||||
if not shared.args.cpu and not torch.cuda.is_available() and not torch.has_mps:
|
||||
print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
|
||||
shared.args.cpu = True
|
||||
|
||||
|
@ -33,9 +33,13 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
|
||||
return input_ids.numpy()
|
||||
elif shared.args.deepspeed:
|
||||
return input_ids.to(device=local_rank)
|
||||
elif torch.has_mps:
|
||||
device = torch.device('mps')
|
||||
return input_ids.to(device)
|
||||
else:
|
||||
return input_ids.cuda()
|
||||
|
||||
|
||||
def decode(output_ids):
|
||||
# Open Assistant relies on special tokens like <|endoftext|>
|
||||
if re.match('(oasst|galactica)-*', shared.model_name.lower()):
|
||||
|
Loading…
Reference in New Issue
Block a user