llama : minor style

This commit is contained in:
Georgi Gerganov 2024-09-02 11:52:04 +03:00
parent 8f1d81a0b6
commit c6d4cb4655
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -8711,8 +8711,7 @@ static void llm_build_kv_store(
GGML_ASSERT(kv.size == n_ctx); GGML_ASSERT(kv.size == n_ctx);
struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa, struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa, ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa)*kv_head);
(ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa))*kv_head);
cb(k_cache_view, "k_cache_view", il); cb(k_cache_view, "k_cache_view", il);
// note: storing RoPE-ed version of K in the KV cache // note: storing RoPE-ed version of K in the KV cache
@ -8723,8 +8722,7 @@ static void llm_build_kv_store(
struct ggml_tensor * v_cache_view = nullptr; struct ggml_tensor * v_cache_view = nullptr;
if (cparams.flash_attn) { if (cparams.flash_attn) {
v_cache_view = ggml_view_1d(ctx, kv.v_l[il], n_tokens*n_embd_v_gqa, v_cache_view = ggml_view_1d(ctx, kv.v_l[il], n_tokens*n_embd_v_gqa, ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa)*kv_head);
(kv_head)*ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa));
} else { } else {
// note: the V cache is transposed when not using flash attention // note: the V cache is transposed when not using flash attention
v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa, v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa,
@ -9211,8 +9209,7 @@ static struct ggml_tensor * llm_build_kv(
struct ggml_tensor * cur; struct ggml_tensor * cur;
cur = llm_build_kqv(ctx, lctx, kv, graph, wo, wo_b, cur = llm_build_kqv(ctx, lctx, kv, graph, wo, wo_b, q_cur, kq_mask, n_tokens, n_kv, kq_scale, cb, il);
q_cur, kq_mask, n_tokens, n_kv, kq_scale, cb, il);
cb(cur, "kqv_out", il); cb(cur, "kqv_out", il);
return cur; return cur;