Empty the cuda cache at model.generate()

This commit is contained in:
oobabooga 2023-02-25 14:39:13 -03:00
parent 1878acd9f3
commit 700311ce40

View File

@ -73,6 +73,8 @@ def formatted_outputs(reply, model_name):
return reply return reply
def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=None, stopping_string=None): def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=None, stopping_string=None):
torch.cuda.empty_cache()
original_question = question original_question = question
if not (shared.args.chat or shared.args.cai_chat): if not (shared.args.chat or shared.args.cai_chat):
question = apply_extensions(question, "input") question = apply_extensions(question, "input")