server : fix parallel generation with very small batch sizes

This commit is contained in:
Francis Couture-Harpin 2024-08-13 22:03:57 -04:00
parent 7eda5583fa
commit c1b738ef43

View File

@ -753,13 +753,13 @@ struct server_context {
default_generation_settings_for_props = get_formated_generation(slots.front()); default_generation_settings_for_props = get_formated_generation(slots.front());
default_generation_settings_for_props["seed"] = -1; default_generation_settings_for_props["seed"] = -1;
// the update_slots() logic will always submit a maximum of n_batch tokens // the update_slots() logic will always submit a maximum of n_batch or n_parralel tokens
// note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used) // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
{ {
const int32_t n_batch = llama_n_batch(ctx); const int32_t n_batch = llama_n_batch(ctx);
// only a single seq_id per token is needed // only a single seq_id per token is needed
batch = llama_batch_init(n_batch, 0, 1); batch = llama_batch_init(std::max(n_batch, params.n_parallel), 0, 1);
} }
metrics.init(); metrics.init();