mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-12-24 13:28:59 +01:00
Release 8-bit models memory
This commit is contained in:
parent
f9faad4cfa
commit
759da435e3
@ -11,6 +11,7 @@ import transformers
|
|||||||
from html_generator import *
|
from html_generator import *
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
import warnings
|
import warnings
|
||||||
|
import gc
|
||||||
|
|
||||||
|
|
||||||
transformers.logging.set_verbosity_error()
|
transformers.logging.set_verbosity_error()
|
||||||
@ -151,6 +152,7 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok
|
|||||||
model = None
|
model = None
|
||||||
tokenizer = None
|
tokenizer = None
|
||||||
if not args.cpu:
|
if not args.cpu:
|
||||||
|
gc.collect()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
model, tokenizer = load_model(model_name)
|
model, tokenizer = load_model(model_name)
|
||||||
if inference_settings != loaded_preset:
|
if inference_settings != loaded_preset:
|
||||||
|
Loading…
Reference in New Issue
Block a user