Fix LoRA on mps

This commit is contained in:
oobabooga 2023-03-25 01:18:32 -03:00 committed by GitHub
parent 3da633a497
commit 25be9698c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1,5 +1,7 @@
from pathlib import Path from pathlib import Path
import torch
import modules.shared as shared import modules.shared as shared
from modules.models import load_model from modules.models import load_model
from modules.text_generation import clear_torch_cache from modules.text_generation import clear_torch_cache
@ -34,4 +36,8 @@ def add_lora_to_model(lora_name):
if not shared.args.load_in_8bit and not shared.args.cpu: if not shared.args.load_in_8bit and not shared.args.cpu:
shared.model.half() shared.model.half()
if not hasattr(shared.model, "hf_device_map"): if not hasattr(shared.model, "hf_device_map"):
shared.model.cuda() if torch.has_mps:
device = torch.device('mps')
shared.model = shared.model.to(device)
else:
shared.model = shared.model.cuda()