mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-26 01:30:20 +01:00
Style changes
This commit is contained in:
parent
bfafd07f44
commit
e3810dff40
@ -1,19 +1,17 @@
|
||||
import json
|
||||
import math
|
||||
import random
|
||||
import shutil
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import torch
|
||||
import transformers
|
||||
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
|
||||
from datasets import Dataset, load_dataset
|
||||
from peft import (
|
||||
LoraConfig,
|
||||
@ -240,6 +238,7 @@ def backup_adapter(input_folder):
|
||||
except Exception as e:
|
||||
print("An error occurred in backup_adapter:", str(e))
|
||||
|
||||
|
||||
def calc_trainable_parameters(model):
|
||||
trainable_params = 0
|
||||
all_param = 0
|
||||
@ -563,7 +562,6 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch
|
||||
if lora_all_param > 0:
|
||||
print(f"Trainable params: {lora_trainable_param:,d} ({100 * lora_trainable_param / lora_all_param:.4f} %), All params: {lora_all_param:,d} (Model: {model_all_params:,d})")
|
||||
|
||||
|
||||
train_log.update({"base_model_name": shared.model_name})
|
||||
train_log.update({"base_model_class": shared.model.__class__.__name__})
|
||||
train_log.update({"base_loaded_in_4bit": getattr(lora_model, "is_loaded_in_4bit", False)})
|
||||
|
Loading…
Reference in New Issue
Block a user