Add stop parameter for flexgen (#105)

This commit is contained in:
oobabooga 2023-02-22 11:23:36 -03:00
parent ea21a22940
commit 044b963987

View File

@ -349,7 +349,11 @@ def generate_reply(question, tokens, do_sample, max_new_tokens, temperature, top
input_ids = encode(question, tokens) input_ids = encode(question, tokens)
cuda = "" if (args.cpu or args.deepspeed or args.flexgen) else ".cuda()" cuda = "" if (args.cpu or args.deepspeed or args.flexgen) else ".cuda()"
n = tokenizer.eos_token_id if eos_token is None else tokenizer.encode(eos_token, return_tensors='pt')[0][-1] if not args.flexgen:
n = tokenizer.eos_token_id if eos_token is None else tokenizer.encode(eos_token, return_tensors='pt')[0][-1]
else:
n = tokenizer(eos_token).input_ids[0] if eos_token else None
if stopping_string is not None: if stopping_string is not None:
# The stopping_criteria code below was copied from # The stopping_criteria code below was copied from
# https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py # https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py
@ -384,6 +388,7 @@ def generate_reply(question, tokens, do_sample, max_new_tokens, temperature, top
generate_params = [ generate_params = [
f"do_sample={do_sample}", f"do_sample={do_sample}",
f"temperature={temperature}", f"temperature={temperature}",
f"stop={n}",
] ]
if args.deepspeed: if args.deepspeed: