mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-15 14:50:51 +01:00
ee7136c6d1
llama: add support for QRWKV6 model architecture (#11001) * WIP: Add support for RWKV6Qwen2 Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * RWKV: Some graph simplification Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * Add support for RWKV6Qwen2 with cpu and cuda GLA Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * RWKV6[QWEN2]: Concat lerp weights together to reduce cpu overhead Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * Fix some typos Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * code format changes Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * Fix wkv test & add gla test Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * Fix cuda warning Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * Update README.md Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * Update ggml/src/ggml-cuda/gla.cu Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Fix fused lerp weights loading with RWKV6 Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * better sanity check skipping for QRWKV6 in llama-quant thanks @compilade Signed-off-by: Molly Sophia <mollysophia379@gmail.com> Co-authored-by: compilade <git@compilade.net> --------- Signed-off-by: Molly Sophia <mollysophia379@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: compilade <git@compilade.net>
72 lines
1.6 KiB
C++
72 lines
1.6 KiB
C++
#include "llama-hparams.h"
|
|
|
|
#include "ggml.h"
|
|
|
|
uint32_t llama_hparams::n_head(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return n_head_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
uint32_t llama_hparams::n_head_kv(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return n_head_kv_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
uint32_t llama_hparams::n_ff(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return n_ff_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
uint32_t llama_hparams::n_gqa(uint32_t il) const {
|
|
const uint32_t n_head = this->n_head(il);
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
if (n_head_kv == 0) {
|
|
return 0;
|
|
}
|
|
|
|
return n_head/n_head_kv;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const {
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
return n_embd_head_k * n_head_kv;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const {
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
return n_embd_head_v * n_head_kv;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_k_s() const {
|
|
if (wkv_head_size != 0) {
|
|
// for RWKV models
|
|
return token_shift_count * n_embd;
|
|
}
|
|
|
|
// TODO: maybe support other convolution strides than 1
|
|
// NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
|
|
return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_v_s() const {
|
|
if (wkv_head_size != 0) {
|
|
// corresponds to RWKV's wkv_states size
|
|
return n_embd * wkv_head_size;
|
|
}
|
|
|
|
// corresponds to Mamba's ssm_states size
|
|
return ssm_d_state * ssm_d_inner;
|
|
}
|