mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-03 17:51:09 +01:00
improve layer backend printing with ranges
This commit is contained in:
parent
c2beeb8e3a
commit
4e94af3060
15
llama.cpp
15
llama.cpp
@ -2709,9 +2709,18 @@ struct llama_context * llama_new_context_with_model(
|
||||
|
||||
fprintf(stderr, "%s: layer backends: ", __func__);
|
||||
fprintf(stderr, "input: %s, ", ggml_backend_name(ctx->model.backend_input));
|
||||
for (int i = 0; i < (int)ctx->model.hparams.n_layer; i++) {
|
||||
if (i == 0 || ctx->model.backend_layers[i] != ctx->model.backend_layers[i-1]) {
|
||||
fprintf(stderr, "layer %d: %s, ", i, ggml_backend_name(ctx->model.backend_layers[i]));
|
||||
|
||||
int start = 0;
|
||||
struct ggml_backend * prev_backend = ctx->model.backend_layers[0];
|
||||
for (int i = 1; i <= (int)ctx->model.hparams.n_layer; i++) {
|
||||
if (i == (int)ctx->model.hparams.n_layer || ctx->model.backend_layers[i] != prev_backend) {
|
||||
if (start == i - 1) {
|
||||
fprintf(stderr, "layer %d: %s, ", start, ggml_backend_name(prev_backend));
|
||||
} else {
|
||||
fprintf(stderr, "layers %d-%d: %s, ", start, i - 1, ggml_backend_name(prev_backend));
|
||||
}
|
||||
start = i;
|
||||
prev_backend = ctx->model.backend_layers[i];
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "output: %s, ", ggml_backend_name(ctx->model.backend_output));
|
||||
|
Loading…
Reference in New Issue
Block a user