From 80f1faac87c267547328bc7c926f9dd4f2610226 Mon Sep 17 00:00:00 2001 From: Howard Su Date: Sun, 21 May 2023 22:31:19 +0800 Subject: [PATCH] format fix --- ggml.c | 20 ++++++++++++-------- llama.cpp | 12 ++++++++---- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/ggml.c b/ggml.c index 6cddb3f9e..1223afced 100644 --- a/ggml.c +++ b/ggml.c @@ -856,10 +856,11 @@ void quantize_upgrade(enum ggml_type type, void* data, size_t * size, bool shuff new_blk_buf.d = GGML_FP32_TO_FP16(blk[i].d); - if (shuffle) + if (shuffle) { quantize_shuffle_block(blk[i].qs, new_blk_buf.qs, qk/4); - else + } else { memcpy(new_blk_buf.qs, blk[i].qs, qk / 2); + } memcpy(&new_blk[i], &new_blk_buf, sizeof(block_q4_0)); } @@ -875,10 +876,11 @@ void quantize_upgrade(enum ggml_type type, void* data, size_t * size, bool shuff new_blk_buf.d = GGML_FP32_TO_FP16(blk[i].d); new_blk_buf.m = GGML_FP32_TO_FP16(blk[i].m); - if (shuffle) + if (shuffle) { quantize_shuffle_block(blk[i].qs, new_blk_buf.qs, qk/4); - else + } else { memcpy(new_blk_buf.qs, blk[i].qs, qk / 2); + } memcpy(&new_blk[i], &new_blk_buf, sizeof(block_q4_1)); } } else if (type == GGML_TYPE_Q5_0) { @@ -889,10 +891,11 @@ void quantize_upgrade(enum ggml_type type, void* data, size_t * size, bool shuff block_q5_0 new_blk; for (size_t i = 0; i < nb ; i++) { - if (shuffle) + if (shuffle) { quantize_shuffle_block(blk[i].qs, new_blk.qs, qk/4); - else + } else { memcpy(new_blk.qs, blk[i].qs, qk / 2); + } memcpy(blk[i].qs, new_blk.qs, sizeof(new_blk.qs)); } } else if (type == GGML_TYPE_Q5_1) { @@ -903,10 +906,11 @@ void quantize_upgrade(enum ggml_type type, void* data, size_t * size, bool shuff block_q5_1 new_blk; for (size_t i = 0; i < nb ; i++) { - if (shuffle) + if (shuffle) { quantize_shuffle_block(blk[i].qs, new_blk.qs, qk/4); - else + } else { memcpy(new_blk.qs, blk[i].qs, qk / 2); + } memcpy(&blk[i], &new_blk, sizeof(new_blk)); } } else if (type == GGML_TYPE_Q8_0) { diff --git a/llama.cpp b/llama.cpp index bc1e02017..5f4714be5 100644 --- a/llama.cpp +++ b/llama.cpp @@ -327,6 +327,8 @@ static size_t llama_calc_tensor_size_prev3(const std::vector & ne, enu case GGML_TYPE_Q8_0: size += 2; break; + default: + break; } for (uint32_t dim : ne) { @@ -343,10 +345,11 @@ struct llama_load_tensor_shard { size_t file_off; void calc_size(llama_file_version file_version) { - if (file_version == LLAMA_FILE_VERSION_GGJT_V3) + if (file_version == LLAMA_FILE_VERSION_GGJT_V3) { size = llama_calc_tensor_size(ne, type); - else + } else { size = llama_calc_tensor_size_prev3(ne, type); + } } }; @@ -426,10 +429,11 @@ struct llama_load_tensor { } void calc_size(llama_file_version file_version) { - if (file_version == LLAMA_FILE_VERSION_GGJT_V3) + if (file_version == LLAMA_FILE_VERSION_GGJT_V3) { size = llama_calc_tensor_size(ne, type); - else + } else { size = llama_calc_tensor_size_prev3(ne, type); + } } };