ppl : fix n_seq_max for perplexity (#8277)

* ppl : fix n_seq_max for perplexity

* use 1 seq for kl_divergence
This commit is contained in:
slaren 2024-07-03 19:33:31 +02:00 committed by GitHub
parent 916248af1f
commit 5f2d4e60e2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1991,6 +1991,12 @@ int main(int argc, char ** argv) {
params.n_batch = std::min(params.n_batch, n_kv); params.n_batch = std::min(params.n_batch, n_kv);
} else { } else {
params.n_batch = std::min(params.n_batch, params.n_ctx); params.n_batch = std::min(params.n_batch, params.n_ctx);
if (params.kl_divergence) {
params.n_parallel = 1;
} else {
// ensure there's at least enough seq_ids for HellaSwag
params.n_parallel = std::max(4, params.n_parallel);
}
} }
if (params.ppl_stride > 0) { if (params.ppl_stride > 0) {
@ -2015,9 +2021,6 @@ int main(int argc, char ** argv) {
llama_model * model; llama_model * model;
llama_context * ctx; llama_context * ctx;
// ensure there's at least enough seq_ids for HellaSwag
params.n_parallel = std::max(4, params.n_parallel);
// load the model and apply lora adapter, if any // load the model and apply lora adapter, if any
std::tie(model, ctx) = llama_init_from_gpt_params(params); std::tie(model, ctx) = llama_init_from_gpt_params(params);
if (model == NULL) { if (model == NULL) {