diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index af82cd6cd..42dace219 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2504,11 +2504,6 @@ class Gemma2Model(Model): ) self.gguf_writer.add_sliding_window(self.hparams["sliding_window"]) - # sanity check - attn_scalar = self.hparams["query_pre_attn_scalar"] - if attn_scalar != hparams["hidden_size"] / hparams["num_attention_heads"]: - raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head") - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused diff --git a/src/llama.cpp b/src/llama.cpp index 77d34dca2..400a4232b 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -11680,7 +11680,12 @@ struct llm_build_context { ext_factor, attn_factor, beta_fast, beta_slow); cb(Qcur, "Qcur", il); - Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); + // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e + switch (model.type) { + case e_model::MODEL_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break; + case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break; + default: GGML_ASSERT(false); + }; cb(Qcur, "Qcur_scaled", il); Kcur = ggml_rope_ext(