From e91a2224e49aeed6e1382643dcb053ec6c168440 Mon Sep 17 00:00:00 2001 From: klosax <131523366+klosax@users.noreply.github.com> Date: Sun, 13 Aug 2023 00:02:44 +0200 Subject: [PATCH] convert-llama-h5-to-gguf.py : n_layer --> n_block --- convert-llama-h5-to-gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert-llama-h5-to-gguf.py b/convert-llama-h5-to-gguf.py index bf6ff6aa7..055b6b78d 100644 --- a/convert-llama-h5-to-gguf.py +++ b/convert-llama-h5-to-gguf.py @@ -76,7 +76,7 @@ gguf_writer.add_name(last_dir) gguf_writer.add_architecture(llm_arch) gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"]) gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"]) -gguf_writer.add_layer_count(llm_arch, block_count) +gguf_writer.add_block_count(llm_arch, block_count) gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"]) gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"]) gguf_writer.add_head_count(llm_arch, head_count)