mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-23 00:18:20 +01:00
Improve some warnings
This commit is contained in:
parent
057b1b2978
commit
9c77ab4fc2
@ -147,7 +147,7 @@ def load_model(model_name):
|
|||||||
|
|
||||||
# Monkey patch
|
# Monkey patch
|
||||||
if shared.args.monkey_patch:
|
if shared.args.monkey_patch:
|
||||||
logging.warning("Warning: applying the monkey patch for using LoRAs in 4-bit mode. It may cause undefined behavior outside its intended scope.")
|
logging.warning("Applying the monkey patch for using LoRAs in 4-bit mode. It may cause undefined behavior outside its intended scope.")
|
||||||
from modules.monkey_patch_gptq_lora import load_model_llama
|
from modules.monkey_patch_gptq_lora import load_model_llama
|
||||||
|
|
||||||
model, _ = load_model_llama(model_name)
|
model, _ = load_model_llama(model_name)
|
||||||
@ -162,7 +162,7 @@ def load_model(model_name):
|
|||||||
else:
|
else:
|
||||||
params = {"low_cpu_mem_usage": True}
|
params = {"low_cpu_mem_usage": True}
|
||||||
if not any((shared.args.cpu, torch.cuda.is_available(), torch.has_mps)):
|
if not any((shared.args.cpu, torch.cuda.is_available(), torch.has_mps)):
|
||||||
logging.warning("Warning: torch.cuda.is_available() returned False. This means that no GPU has been detected. Falling back to CPU mode.")
|
logging.warning("torch.cuda.is_available() returned False. This means that no GPU has been detected. Falling back to CPU mode.")
|
||||||
shared.args.cpu = True
|
shared.args.cpu = True
|
||||||
|
|
||||||
if shared.args.cpu:
|
if shared.args.cpu:
|
||||||
|
Loading…
Reference in New Issue
Block a user