From d6fd53afd64417203d77e1530f2f7bf182ffa96e Mon Sep 17 00:00:00 2001 From: klosax <131523366+klosax@users.noreply.github.com> Date: Thu, 17 Aug 2023 15:24:35 +0200 Subject: [PATCH] llama.cpp : use ggml_elements() --- llama.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/llama.cpp b/llama.cpp index 5a1501651..b7ca6db3c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1051,11 +1051,7 @@ struct llama_model_loader { for (int i = 0; i < n_tensors; i++) { const char * name = gguf_get_tensor_name(ctx_gguf, i); struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name); - size_t elem = 1; - for (int j = 0; j < t->n_dims; j++) { - elem *= t->ne[j]; - } - n_tot_elements += elem; + n_tot_elements += ggml_nelements(t); } // print meta data