mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 13:28:50 +01:00
main : better name for variable n_print (#4874)
This commit is contained in:
parent
3ca63b4538
commit
7edefbd79c
@ -630,12 +630,12 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.ppl_stride = std::stoi(argv[i]);
|
params.ppl_stride = std::stoi(argv[i]);
|
||||||
} else if (arg == "-stc" || arg == "--show-token-count") {
|
} else if (arg == "-ptc" || arg == "--print-token-count") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.token_interval = std::stoi(argv[i]);
|
params.n_print = std::stoi(argv[i]);
|
||||||
} else if (arg == "--ppl-output-type") {
|
} else if (arg == "--ppl-output-type") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -950,8 +950,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
printf(" --override-kv KEY=TYPE:VALUE\n");
|
printf(" --override-kv KEY=TYPE:VALUE\n");
|
||||||
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
|
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
|
||||||
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
||||||
printf(" -stc N --show-token-count N\n");
|
printf(" -stc N --print-token-count N\n");
|
||||||
printf(" show consumed tokens every N tokens (default: %d)\n", params.token_interval);
|
printf(" print token count every N tokens (default: %d)\n", params.n_print);
|
||||||
printf("\n");
|
printf("\n");
|
||||||
#ifndef LOG_DISABLE_LOGS
|
#ifndef LOG_DISABLE_LOGS
|
||||||
log_print_usage();
|
log_print_usage();
|
||||||
|
@ -64,7 +64,7 @@ struct gpt_params {
|
|||||||
int32_t n_beams = 0; // if non-zero then use beam search of given width.
|
int32_t n_beams = 0; // if non-zero then use beam search of given width.
|
||||||
int32_t grp_attn_n = 1; // group-attention factor
|
int32_t grp_attn_n = 1; // group-attention factor
|
||||||
int32_t grp_attn_w = 512; // group-attention width
|
int32_t grp_attn_w = 512; // group-attention width
|
||||||
int32_t token_interval = -1; // show token count every 512 tokens (-1 = disabled)
|
int32_t n_print = -1; // print token count every n tokens (-1 = disabled)
|
||||||
float rope_freq_base = 0.0f; // RoPE base frequency
|
float rope_freq_base = 0.0f; // RoPE base frequency
|
||||||
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
|
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
|
||||||
float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
|
float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
|
||||||
|
@ -651,7 +651,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
LOG("n_past = %d\n", n_past);
|
LOG("n_past = %d\n", n_past);
|
||||||
// Display total tokens alongside total time
|
// Display total tokens alongside total time
|
||||||
if (params.token_interval > 0 && n_past % params.token_interval == 0) {
|
if (params.n_print > 0 && n_past % params.n_print == 0) {
|
||||||
LOG_TEE("\n\033[31mTokens consumed so far = %d / %d \033[0m\n", n_past, n_ctx);
|
LOG_TEE("\n\033[31mTokens consumed so far = %d / %d \033[0m\n", n_past, n_ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user