fixup! Initial OpenELM support (270M only so far)

Fix formatting
This commit is contained in:
Icecream95 2024-05-18 20:19:10 +12:00
parent aaabe2e361
commit 60b2e1b9c5

View File

@ -6115,8 +6115,6 @@ static bool llm_load_tensors(
} }
} break; } break;
case LLM_ARCH_OPENELM: case LLM_ARCH_OPENELM:
{
{
{ {
std::vector<int> num_kv_heads = {3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5}; std::vector<int> num_kv_heads = {3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5};
std::vector<int> num_query_heads = {12, 12, 12, 12, 12, 16, 16, 16, 16, 16, 16, 16, 20, 20, 20, 20}; std::vector<int> num_query_heads = {12, 12, 12, 12, 12, 16, 16, 16, 16, 16, 16, 16, 20, 20, 20, 20};
@ -6157,8 +6155,6 @@ static bool llm_load_tensors(
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * ffn_inter }); layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * ffn_inter });
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { ffn_inter, n_embd }); layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { ffn_inter, n_embd });
} }
}
}
} break; } break;
default: default:
throw std::runtime_error("unknown architecture"); throw std::runtime_error("unknown architecture");