mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-25 17:29:22 +01:00
llama.cpp: minor log change & lint
This commit is contained in:
parent
1b05832f9a
commit
68059d7c23
@ -64,6 +64,7 @@ class LlamaCppModel:
|
|||||||
else:
|
else:
|
||||||
cache_capacity = int(shared.args.cache_capacity)
|
cache_capacity = int(shared.args.cache_capacity)
|
||||||
|
|
||||||
|
if cache_capacity > 0:
|
||||||
logger.info("Cache capacity is " + str(cache_capacity) + " bytes")
|
logger.info("Cache capacity is " + str(cache_capacity) + " bytes")
|
||||||
|
|
||||||
if shared.args.tensor_split is None or shared.args.tensor_split.strip() == '':
|
if shared.args.tensor_split is None or shared.args.tensor_split.strip() == '':
|
||||||
@ -118,9 +119,7 @@ class LlamaCppModel:
|
|||||||
self.grammar = None
|
self.grammar = None
|
||||||
|
|
||||||
def generate(self, prompt, state, callback=None):
|
def generate(self, prompt, state, callback=None):
|
||||||
|
|
||||||
LogitsProcessorList = llama_cpp_lib().LogitsProcessorList
|
LogitsProcessorList = llama_cpp_lib().LogitsProcessorList
|
||||||
|
|
||||||
prompt = prompt if type(prompt) is str else prompt.decode()
|
prompt = prompt if type(prompt) is str else prompt.decode()
|
||||||
|
|
||||||
# Handle truncation
|
# Handle truncation
|
||||||
@ -163,6 +162,7 @@ class LlamaCppModel:
|
|||||||
for completion_chunk in completion_chunks:
|
for completion_chunk in completion_chunks:
|
||||||
if shared.stop_everything:
|
if shared.stop_everything:
|
||||||
break
|
break
|
||||||
|
|
||||||
text = completion_chunk['choices'][0]['text']
|
text = completion_chunk['choices'][0]['text']
|
||||||
output += text
|
output += text
|
||||||
if callback:
|
if callback:
|
||||||
|
Loading…
Reference in New Issue
Block a user