mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
parent
8841ce3f43
commit
8125e6cbfc
@ -1880,6 +1880,7 @@ struct server_context {
|
|||||||
if (slot.state == SLOT_STATE_STARTED) {
|
if (slot.state == SLOT_STATE_STARTED) {
|
||||||
slot.t_start_process_prompt = ggml_time_us();
|
slot.t_start_process_prompt = ggml_time_us();
|
||||||
slot.t_start_generation = 0;
|
slot.t_start_generation = 0;
|
||||||
|
|
||||||
slot.n_past = 0;
|
slot.n_past = 0;
|
||||||
slot.n_prompt_tokens = prompt_tokens.size();
|
slot.n_prompt_tokens = prompt_tokens.size();
|
||||||
slot.state = SLOT_STATE_PROCESSING_PROMPT;
|
slot.state = SLOT_STATE_PROCESSING_PROMPT;
|
||||||
|
@ -266,8 +266,10 @@ static llama_tokens format_infill(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
|
// for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
|
||||||
const int n_suffix_take = std::min<int>(tokens_suffix.size(), (n_batch/4));
|
const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4));
|
||||||
const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4) - 3);
|
const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size())));
|
||||||
|
|
||||||
|
SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take));
|
||||||
|
|
||||||
// fill the rest of the context with extra chunks
|
// fill the rest of the context with extra chunks
|
||||||
const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());
|
const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());
|
||||||
|
Loading…
Reference in New Issue
Block a user