From d117d4dc5dadb46831036bfa4d6e5e8c86babaf1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 7 Jan 2024 09:50:31 +0200 Subject: [PATCH] llama : print tensor meta for debugging --- llama.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 3bb056dba..06db40303 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2180,7 +2180,11 @@ struct llama_model_loader { type_max = type; } - // LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); + // TODO: make runtime configurable +#if 0 + struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i)); + LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, ggml_get_name(meta), ggml_type_name(type), llama_format_tensor_shape(meta).c_str()); +#endif } switch (type_max) {