mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 05:48:47 +01:00
swiftui : enable stream updating (#7754)
This commit is contained in:
parent
b1ef562bc1
commit
0e64591e82
@ -131,22 +131,29 @@ class LlamaState: ObservableObject {
|
|||||||
|
|
||||||
messageLog += "\(text)"
|
messageLog += "\(text)"
|
||||||
|
|
||||||
while await llamaContext.n_cur < llamaContext.n_len {
|
Task.detached {
|
||||||
let result = await llamaContext.completion_loop()
|
while await llamaContext.n_cur < llamaContext.n_len {
|
||||||
messageLog += "\(result)"
|
let result = await llamaContext.completion_loop()
|
||||||
|
await MainActor.run {
|
||||||
|
self.messageLog += "\(result)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let t_end = DispatchTime.now().uptimeNanoseconds
|
||||||
|
let t_generation = Double(t_end - t_heat_end) / self.NS_PER_S
|
||||||
|
let tokens_per_second = Double(await llamaContext.n_len) / t_generation
|
||||||
|
|
||||||
|
await llamaContext.clear()
|
||||||
|
|
||||||
|
await MainActor.run {
|
||||||
|
self.messageLog += """
|
||||||
|
\n
|
||||||
|
Done
|
||||||
|
Heat up took \(t_heat)s
|
||||||
|
Generated \(tokens_per_second) t/s\n
|
||||||
|
"""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let t_end = DispatchTime.now().uptimeNanoseconds
|
|
||||||
let t_generation = Double(t_end - t_heat_end) / NS_PER_S
|
|
||||||
let tokens_per_second = Double(await llamaContext.n_len) / t_generation
|
|
||||||
|
|
||||||
await llamaContext.clear()
|
|
||||||
messageLog += """
|
|
||||||
\n
|
|
||||||
Done
|
|
||||||
Heat up took \(t_heat)s
|
|
||||||
Generated \(tokens_per_second) t/s\n
|
|
||||||
"""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func bench() async {
|
func bench() async {
|
||||||
|
Loading…
Reference in New Issue
Block a user