fixes #7999 (adds control vectors to all build_XXX() functions in llama.cpp [needs testing] (#8060)

* fixes #7999

The `build_command_r` forgot to add the control vector.

* Fixes qwen2 too

* Fixed all models' control vectors

* Removed double calls to `cb(cur, "l_out", il)`

* Moved control vector logic to llama_control_vector:apply_to()
This commit is contained in:
jukofyork 2024-06-25 21:47:40 +01:00 committed by GitHub
parent 6fcbf68235
commit 163d50adaf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

112
llama.cpp
View File

@ -2368,13 +2368,21 @@ struct llama_control_vector {
int32_t layer_start = -1; int32_t layer_start = -1;
int32_t layer_end = -1; int32_t layer_end = -1;
ggml_tensor * tensor_for(int il) const { struct ggml_tensor * tensor_for(int il) const {
if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) { if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
return nullptr; return nullptr;
} }
return tensors[il]; return tensors[il];
} }
struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const {
ggml_tensor * layer_dir = tensor_for(il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx, cur, layer_dir);
}
return cur;
}
~llama_control_vector() { ~llama_control_vector() {
for (struct ggml_context * ctx : ctxs) { for (struct ggml_context * ctx : ctxs) {
ggml_free(ctx); ggml_free(ctx);
@ -8023,10 +8031,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); cur = lctx.cvec.apply_to(ctx0, cur, il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -8141,6 +8146,7 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -8245,6 +8251,7 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -8360,9 +8367,8 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "l_out", il);
cur = ggml_add(ctx0, cur, inpL); cur = ggml_add(ctx0, cur, inpL);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -8514,10 +8520,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); cur = lctx.cvec.apply_to(ctx0, cur, il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -8648,10 +8651,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); cur = lctx.cvec.apply_to(ctx0, cur, il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -8757,8 +8757,12 @@ struct llm_build_context {
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
} }
inpL = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il); cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);
// input for next layer
inpL = cur;
} }
cur = llm_build_norm(ctx0, inpL, hparams, cur = llm_build_norm(ctx0, inpL, hparams,
@ -8846,6 +8850,7 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -9141,8 +9146,12 @@ struct llm_build_context {
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
} }
inpL = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il); cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);
// input for next layer
inpL = cur;
} }
cur = llm_build_norm(ctx0, inpL, hparams, cur = llm_build_norm(ctx0, inpL, hparams,
@ -9276,6 +9285,7 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -9424,6 +9434,7 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -9536,6 +9547,7 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -9647,6 +9659,7 @@ struct llm_build_context {
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -9792,6 +9805,7 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -9912,11 +9926,11 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_output); cur = ggml_add(ctx0, cur, ffn_output);
cb(cur, "l_out", il);
cur = ggml_add(ctx0, cur, inpL); cur = ggml_add(ctx0, cur, inpL);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer
inpL = cur; inpL = cur;
} }
@ -10048,8 +10062,10 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, residual, cur); cur = ggml_add(ctx0, residual, cur);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer
inpL = cur; inpL = cur;
} }
@ -10148,9 +10164,8 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, sa_out); cur = ggml_add(ctx0, cur, sa_out);
cb(cur, "l_out", il);
cur = ggml_add(ctx0, cur, inpL); cur = ggml_add(ctx0, cur, inpL);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -10256,8 +10271,12 @@ struct llm_build_context {
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
} }
inpL = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il); cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);
// input for next layer
inpL = cur;
} }
cur = llm_build_norm(ctx0, inpL, hparams, cur = llm_build_norm(ctx0, inpL, hparams,
@ -10363,8 +10382,12 @@ struct llm_build_context {
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
} }
inpL = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il); cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);
// input for next layer
inpL = cur;
} }
cur = llm_build_norm(ctx0, inpL, hparams, cur = llm_build_norm(ctx0, inpL, hparams,
@ -10476,6 +10499,7 @@ struct llm_build_context {
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -10593,6 +10617,7 @@ struct llm_build_context {
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -10734,6 +10759,7 @@ struct llm_build_context {
cb(cur, "hidden_scaled_ffn", -1); cb(cur, "hidden_scaled_ffn", -1);
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -10846,6 +10872,7 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, sa_out); cur = ggml_add(ctx0, cur, sa_out);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -10962,7 +10989,9 @@ struct llm_build_context {
NULL, NULL,
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -11111,6 +11140,7 @@ struct llm_build_context {
// residual // residual
cur = ggml_add(ctx0, cur, inpL); cur = ggml_add(ctx0, cur, inpL);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -11252,6 +11282,7 @@ struct llm_build_context {
// add together residual + FFN + self-attention // add together residual + FFN + self-attention
cur = ggml_add(ctx0, cur, inpL); cur = ggml_add(ctx0, cur, inpL);
cur = ggml_add(ctx0, cur, attn_out); cur = ggml_add(ctx0, cur, attn_out);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -11387,10 +11418,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); cur = lctx.cvec.apply_to(ctx0, cur, il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -11504,8 +11532,12 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, inpL); cur = ggml_add(ctx0, cur, inpL);
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
inpL = ggml_add(ctx0, cur, attn_out); cur = ggml_add(ctx0, cur, attn_out);
cb(inpL, "l_out", il); cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);
// input for next layer
inpL = cur;
} else { } else {
// attention and ffn are computed sequentially // attention and ffn are computed sequentially
// x = x + attn(ln1(x)) // x = x + attn(ln1(x))
@ -11528,8 +11560,12 @@ struct llm_build_context {
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
inpL = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il); cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);
// input for next layer
inpL = cur;
} }
} }
@ -11656,10 +11692,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_out); cur = ggml_add(ctx0, cur, ffn_out);
cb(cur, "ffn_out", il); cb(cur, "ffn_out", il);
ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); cur = lctx.cvec.apply_to(ctx0, cur, il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer
@ -11892,6 +11925,7 @@ struct llm_build_context {
} }
cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il); cb(cur, "l_out", il);
// input for next layer // input for next layer