mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 14:20:31 +01:00
ppl : fix n_seq_max for perplexity (#8277)
* ppl : fix n_seq_max for perplexity * use 1 seq for kl_divergence
This commit is contained in:
parent
916248af1f
commit
5f2d4e60e2
@ -1991,6 +1991,12 @@ int main(int argc, char ** argv) {
|
||||
params.n_batch = std::min(params.n_batch, n_kv);
|
||||
} else {
|
||||
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
||||
if (params.kl_divergence) {
|
||||
params.n_parallel = 1;
|
||||
} else {
|
||||
// ensure there's at least enough seq_ids for HellaSwag
|
||||
params.n_parallel = std::max(4, params.n_parallel);
|
||||
}
|
||||
}
|
||||
|
||||
if (params.ppl_stride > 0) {
|
||||
@ -2015,9 +2021,6 @@ int main(int argc, char ** argv) {
|
||||
llama_model * model;
|
||||
llama_context * ctx;
|
||||
|
||||
// ensure there's at least enough seq_ids for HellaSwag
|
||||
params.n_parallel = std::max(4, params.n_parallel);
|
||||
|
||||
// load the model and apply lora adapter, if any
|
||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||
if (model == NULL) {
|
||||
|
Loading…
Reference in New Issue
Block a user