mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
Adding Gemma 2 2B configs (#8784)
* Adding Gemma 2 2B configs Updates to Q scaling and Gemma 2 model sizes to match v2 2B model. * Update src/llama.cpp Co-authored-by: slaren <slarengh@gmail.com> --------- Co-authored-by: slaren <slarengh@gmail.com>
This commit is contained in:
parent
44d28ddd5c
commit
398ede5efe
@ -4969,6 +4969,7 @@ static void llm_load_hparams(
|
|||||||
hparams.attn_soft_cap = true;
|
hparams.attn_soft_cap = true;
|
||||||
|
|
||||||
switch (hparams.n_layer) {
|
switch (hparams.n_layer) {
|
||||||
|
case 26: model.type = e_model::MODEL_2B; break;
|
||||||
case 42: model.type = e_model::MODEL_9B; break;
|
case 42: model.type = e_model::MODEL_9B; break;
|
||||||
case 46: model.type = e_model::MODEL_27B; break;
|
case 46: model.type = e_model::MODEL_27B; break;
|
||||||
default: model.type = e_model::MODEL_UNKNOWN;
|
default: model.type = e_model::MODEL_UNKNOWN;
|
||||||
@ -11736,6 +11737,7 @@ struct llm_build_context {
|
|||||||
|
|
||||||
// ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
|
// ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
|
||||||
switch (model.type) {
|
switch (model.type) {
|
||||||
|
case e_model::MODEL_2B:
|
||||||
case e_model::MODEL_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break;
|
case e_model::MODEL_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break;
|
||||||
case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
|
case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
|
||||||
default: GGML_ABORT("fatal error");
|
default: GGML_ABORT("fatal error");
|
||||||
|
Loading…
Reference in New Issue
Block a user