mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 13:58:46 +01:00
Tell users attmepting to run perplexity with too few tokens to use more (#2882)
Closes #2858 Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
parent
e37e69dcc3
commit
fa3582f509
@ -142,6 +142,14 @@ results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & params)
|
|||||||
fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
|
fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
|
||||||
|
|
||||||
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
|
std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
|
||||||
|
|
||||||
|
if (int(tokens.size()) < 2*params.n_ctx) {
|
||||||
|
fprintf(stderr, "%s: you need at least %d tokens to evaluate perplexity with a context of %d\n",__func__,2*params.n_ctx,
|
||||||
|
params.n_ctx);
|
||||||
|
fprintf(stderr, "%s: the data file you provided tokenizes to only %zu tokens\n",__func__,tokens.size());
|
||||||
|
return {std::move(tokens), 0., {}, {}};
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<float> logit_history;
|
std::vector<float> logit_history;
|
||||||
std::vector<float> prob_history;
|
std::vector<float> prob_history;
|
||||||
|
|
||||||
@ -274,6 +282,13 @@ results_perplexity perplexity(llama_context * ctx, const gpt_params & params) {
|
|||||||
auto tim2 = std::chrono::high_resolution_clock::now();
|
auto tim2 = std::chrono::high_resolution_clock::now();
|
||||||
fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
|
fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
|
||||||
|
|
||||||
|
if (int(tokens.size()) < 2*params.n_ctx) {
|
||||||
|
fprintf(stderr, "%s: you need at least %d tokens to evaluate perplexity with a context of %d\n",__func__,2*params.n_ctx,
|
||||||
|
params.n_ctx);
|
||||||
|
fprintf(stderr, "%s: the data file you provided tokenizes to only %zu tokens\n",__func__,tokens.size());
|
||||||
|
return {std::move(tokens), 0., {}, {}};
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<float> logit_history;
|
std::vector<float> logit_history;
|
||||||
logit_history.resize(tokens.size());
|
logit_history.resize(tokens.size());
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user