mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
llama : fix qs.n_attention_wv for DeepSeek-V2 (#9156)
This commit is contained in:
parent
a77feb5d71
commit
78eb487bb0
@ -16822,7 +16822,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
|
|
||||||
// TODO: avoid hardcoded tensor names - use the TN_* constants
|
// TODO: avoid hardcoded tensor names - use the TN_* constants
|
||||||
if (name.find("attn_v.weight") != std::string::npos ||
|
if (name.find("attn_v.weight") != std::string::npos ||
|
||||||
name.find("attn_qkv.weight") != std::string::npos) {
|
name.find("attn_qkv.weight") != std::string::npos ||
|
||||||
|
name.find("attn_kv_b.weight")!= std::string::npos) {
|
||||||
++qs.n_attention_wv;
|
++qs.n_attention_wv;
|
||||||
} else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
|
} else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
|
||||||
qs.has_output = true;
|
qs.has_output = true;
|
||||||
|
Loading…
Reference in New Issue
Block a user