mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-22 09:39:08 +01:00
fixup! Initial OpenELM support (270M only so far)
Fix formatting
This commit is contained in:
parent
aaabe2e361
commit
60b2e1b9c5
@ -6115,8 +6115,6 @@ static bool llm_load_tensors(
|
||||
}
|
||||
} break;
|
||||
case LLM_ARCH_OPENELM:
|
||||
{
|
||||
{
|
||||
{
|
||||
std::vector<int> num_kv_heads = {3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5};
|
||||
std::vector<int> num_query_heads = {12, 12, 12, 12, 12, 16, 16, 16, 16, 16, 16, 16, 20, 20, 20, 20};
|
||||
@ -6157,8 +6155,6 @@ static bool llm_load_tensors(
|
||||
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * ffn_inter });
|
||||
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { ffn_inter, n_embd });
|
||||
}
|
||||
}
|
||||
}
|
||||
} break;
|
||||
default:
|
||||
throw std::runtime_error("unknown architecture");
|
||||
|
Loading…
Reference in New Issue
Block a user