mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-29 02:49:30 +01:00
Merge branch 'oobabooga:main' into lora-and-model-dir
This commit is contained in:
commit
f740ee558c
@ -27,7 +27,7 @@ Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.
|
|||||||
* [FlexGen offload](https://github.com/oobabooga/text-generation-webui/wiki/FlexGen).
|
* [FlexGen offload](https://github.com/oobabooga/text-generation-webui/wiki/FlexGen).
|
||||||
* [DeepSpeed ZeRO-3 offload](https://github.com/oobabooga/text-generation-webui/wiki/DeepSpeed).
|
* [DeepSpeed ZeRO-3 offload](https://github.com/oobabooga/text-generation-webui/wiki/DeepSpeed).
|
||||||
* Get responses via API, [with](https://github.com/oobabooga/text-generation-webui/blob/main/api-example-streaming.py) or [without](https://github.com/oobabooga/text-generation-webui/blob/main/api-example.py) streaming.
|
* Get responses via API, [with](https://github.com/oobabooga/text-generation-webui/blob/main/api-example-streaming.py) or [without](https://github.com/oobabooga/text-generation-webui/blob/main/api-example.py) streaming.
|
||||||
* [LLaMA model, including 4-bit mode](https://github.com/oobabooga/text-generation-webui/wiki/LLaMA-model).
|
* [LLaMA model, including 4-bit GPTQ support](https://github.com/oobabooga/text-generation-webui/wiki/LLaMA-model).
|
||||||
* [RWKV model](https://github.com/oobabooga/text-generation-webui/wiki/RWKV-model).
|
* [RWKV model](https://github.com/oobabooga/text-generation-webui/wiki/RWKV-model).
|
||||||
* [Supports LoRAs](https://github.com/oobabooga/text-generation-webui/wiki/Using-LoRAs).
|
* [Supports LoRAs](https://github.com/oobabooga/text-generation-webui/wiki/Using-LoRAs).
|
||||||
* Supports softprompts.
|
* Supports softprompts.
|
||||||
|
@ -37,21 +37,23 @@ def load_quantized(model_name):
|
|||||||
|
|
||||||
path_to_model = Path(f'models/{model_name}')
|
path_to_model = Path(f'models/{model_name}')
|
||||||
if path_to_model.name.lower().startswith('llama-7b'):
|
if path_to_model.name.lower().startswith('llama-7b'):
|
||||||
pt_model = f'llama-7b-{shared.args.gptq_bits}bit.pt'
|
pt_model = f'llama-7b-{shared.args.gptq_bits}bit'
|
||||||
elif path_to_model.name.lower().startswith('llama-13b'):
|
elif path_to_model.name.lower().startswith('llama-13b'):
|
||||||
pt_model = f'llama-13b-{shared.args.gptq_bits}bit.pt'
|
pt_model = f'llama-13b-{shared.args.gptq_bits}bit'
|
||||||
elif path_to_model.name.lower().startswith('llama-30b'):
|
elif path_to_model.name.lower().startswith('llama-30b'):
|
||||||
pt_model = f'llama-30b-{shared.args.gptq_bits}bit.pt'
|
pt_model = f'llama-30b-{shared.args.gptq_bits}bit'
|
||||||
elif path_to_model.name.lower().startswith('llama-65b'):
|
elif path_to_model.name.lower().startswith('llama-65b'):
|
||||||
pt_model = f'llama-65b-{shared.args.gptq_bits}bit.pt'
|
pt_model = f'llama-65b-{shared.args.gptq_bits}bit'
|
||||||
else:
|
else:
|
||||||
pt_model = f'{model_name}-{shared.args.gptq_bits}bit.pt'
|
pt_model = f'{model_name}-{shared.args.gptq_bits}bit'
|
||||||
|
|
||||||
# Try to find the .pt both in models/ and in the subfolder
|
# Try to find the .safetensors or .pt both in models/ and in the subfolder
|
||||||
pt_path = None
|
pt_path = None
|
||||||
for path in [Path(p) for p in [f"models/{pt_model}", f"{path_to_model}/{pt_model}"]]:
|
for path in [Path(p+ext) for ext in ['.safetensors', '.pt'] for p in [f"models/{pt_model}", f"{path_to_model}/{pt_model}"]]:
|
||||||
if path.exists():
|
if path.exists():
|
||||||
|
print(f"Found {path}")
|
||||||
pt_path = path
|
pt_path = path
|
||||||
|
break
|
||||||
|
|
||||||
if not pt_path:
|
if not pt_path:
|
||||||
print(f"Could not find {pt_model}, exiting...")
|
print(f"Could not find {pt_model}, exiting...")
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
import modules.shared as shared
|
import modules.shared as shared
|
||||||
from modules.models import load_model
|
from modules.models import load_model
|
||||||
from modules.text_generation import clear_torch_cache
|
from modules.text_generation import clear_torch_cache
|
||||||
@ -34,4 +36,8 @@ def add_lora_to_model(lora_name):
|
|||||||
if not shared.args.load_in_8bit and not shared.args.cpu:
|
if not shared.args.load_in_8bit and not shared.args.cpu:
|
||||||
shared.model.half()
|
shared.model.half()
|
||||||
if not hasattr(shared.model, "hf_device_map"):
|
if not hasattr(shared.model, "hf_device_map"):
|
||||||
shared.model.cuda()
|
if torch.has_mps:
|
||||||
|
device = torch.device('mps')
|
||||||
|
shared.model = shared.model.to(device)
|
||||||
|
else:
|
||||||
|
shared.model = shared.model.cuda()
|
||||||
|
Loading…
Reference in New Issue
Block a user