mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-21 23:57:58 +01:00
LoRA: Fix error "Attempting to unscale FP16 gradients" when training (#5268)
This commit is contained in:
parent
535ea9928a
commit
4d14eb8b82
@ -605,6 +605,11 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
|
||||
control.should_training_stop = True
|
||||
print(f"\033[1;31;1mStop Loss {stop_at_loss} reached.\033[0;37;0m")
|
||||
|
||||
# Fix training for mixed precision models
|
||||
for param in shared.model.parameters():
|
||||
if param.requires_grad:
|
||||
param.data = param.data.float()
|
||||
|
||||
trainer = transformers.Trainer(
|
||||
model=lora_model,
|
||||
train_dataset=train_data,
|
||||
|
Loading…
Reference in New Issue
Block a user