mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-11 21:10:24 +01:00
server : do not speculate during prompt processing
ggml-ci
This commit is contained in:
parent
642330ac7c
commit
33d7b70c88
@ -2322,6 +2322,10 @@ struct server_context {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (slot.state != SLOT_STATE_GENERATING) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
llama_token id = slot.sampled;
|
llama_token id = slot.sampled;
|
||||||
|
|
||||||
struct common_speculative_params params_spec;
|
struct common_speculative_params params_spec;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user