From 4ed98bf1ab5766a844df8bca92879ee793b7cf50 Mon Sep 17 00:00:00 2001 From: klosax <131523366+klosax@users.noreply.github.com> Date: Sun, 30 Jul 2023 15:01:47 +0200 Subject: [PATCH] Update convert-llama-h5-to-gguf.py --- convert-llama-h5-to-gguf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/convert-llama-h5-to-gguf.py b/convert-llama-h5-to-gguf.py index 0ea5e21aa..926dd265b 100644 --- a/convert-llama-h5-to-gguf.py +++ b/convert-llama-h5-to-gguf.py @@ -87,6 +87,7 @@ gguf_writer.write_feed_forward_length(llm_arch, hparams["intermediate_size"]) gguf_writer.write_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"]) gguf_writer.write_head_count(llm_arch, hparams["num_attention_heads"]) gguf_writer.write_float32(llm_arch + ".attention.layer_norm_rms_epsilon", hparams["rms_norm_eps"]) +gguf_writer.write_layer_norm_rms_eps(llm_arch, hparams["rms_norm_eps"]) # TOKENIZATION