mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-11 21:10:24 +01:00
llama : remove n_threads from llama_decode_internal (#3614)
This commit removes `n_threads` from the `llama_decode_internal` functions doc comment as it does not exist anymore. It looks like this parameter was removed in Commit 16bc66d9479edd5ee12ec734973554d4493c5dfa ("llama.cpp : split llama_context_params into model and context params"). Signed-off-by: Daniel Bevenius <daniel.bevenius@gmail.com>
This commit is contained in:
parent
424b6381c4
commit
2a4bcbacea
@ -5721,7 +5721,6 @@ static struct ggml_cgraph * llama_build_graph(
|
|||||||
//
|
//
|
||||||
// - lctx: llama context
|
// - lctx: llama context
|
||||||
// - batch: batch to evaluate
|
// - batch: batch to evaluate
|
||||||
// - n_threads: number of threads to use
|
|
||||||
//
|
//
|
||||||
// return 0 on success
|
// return 0 on success
|
||||||
// return positive int on warning
|
// return positive int on warning
|
||||||
|
Loading…
x
Reference in New Issue
Block a user