mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
llama : fix Gemma-2 Query scaling factors (#8473)
* 9B - query_pre_attn_scalar = 256 not 224
See 03e657582d
Gemma 9b should use 256 and not 224 (self.config.hidden_size // self.config.num_attention_heads)
* llama : fix Gemma-2 Query scaling factor
ggml-ci
---------
Co-authored-by: Daniel Han <danielhanchen@gmail.com>
This commit is contained in:
parent
e236528e76
commit
73cf442e7b
@ -2504,11 +2504,6 @@ class Gemma2Model(Model):
|
|||||||
)
|
)
|
||||||
self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
|
self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
|
||||||
|
|
||||||
# sanity check
|
|
||||||
attn_scalar = self.hparams["query_pre_attn_scalar"]
|
|
||||||
if attn_scalar != hparams["hidden_size"] / hparams["num_attention_heads"]:
|
|
||||||
raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head")
|
|
||||||
|
|
||||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
del bid # unused
|
del bid # unused
|
||||||
|
|
||||||
|
@ -11680,7 +11680,12 @@ struct llm_build_context {
|
|||||||
ext_factor, attn_factor, beta_fast, beta_slow);
|
ext_factor, attn_factor, beta_fast, beta_slow);
|
||||||
cb(Qcur, "Qcur", il);
|
cb(Qcur, "Qcur", il);
|
||||||
|
|
||||||
Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head)));
|
// ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
|
||||||
|
switch (model.type) {
|
||||||
|
case e_model::MODEL_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break;
|
||||||
|
case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
|
||||||
|
default: GGML_ASSERT(false);
|
||||||
|
};
|
||||||
cb(Qcur, "Qcur_scaled", il);
|
cb(Qcur, "Qcur_scaled", il);
|
||||||
|
|
||||||
Kcur = ggml_rope_ext(
|
Kcur = ggml_rope_ext(
|
||||||
|
Loading…
Reference in New Issue
Block a user