mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 06:10:29 +01:00
llama.swiftui: fix end of generation bug (#8268)
* fix continuing generating blank lines after getting EOT token or EOS token from LLM * change variable name to is_done (variable name suggested by ggerganov) * minor : fix trailing whitespace * minor : add space --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
c3776cacab
commit
69b9945b44
@ -26,11 +26,12 @@ actor LlamaContext {
|
||||
private var context: OpaquePointer
|
||||
private var batch: llama_batch
|
||||
private var tokens_list: [llama_token]
|
||||
var is_done: Bool = false
|
||||
|
||||
/// This variable is used to store temporarily invalid cchars
|
||||
private var temporary_invalid_cchars: [CChar]
|
||||
|
||||
var n_len: Int32 = 64
|
||||
var n_len: Int32 = 1024
|
||||
var n_cur: Int32 = 0
|
||||
|
||||
var n_decode: Int32 = 0
|
||||
@ -160,6 +161,7 @@ actor LlamaContext {
|
||||
|
||||
if llama_token_is_eog(model, new_token_id) || n_cur == n_len {
|
||||
print("\n")
|
||||
is_done = true
|
||||
let new_token_str = String(cString: temporary_invalid_cchars + [0])
|
||||
temporary_invalid_cchars.removeAll()
|
||||
return new_token_str
|
||||
|
@ -132,7 +132,7 @@ class LlamaState: ObservableObject {
|
||||
messageLog += "\(text)"
|
||||
|
||||
Task.detached {
|
||||
while await llamaContext.n_cur < llamaContext.n_len {
|
||||
while await !llamaContext.is_done {
|
||||
let result = await llamaContext.completion_loop()
|
||||
await MainActor.run {
|
||||
self.messageLog += "\(result)"
|
||||
|
Loading…
Reference in New Issue
Block a user