mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-01 00:39:00 +01:00
llama : fix build
ggml-ci
This commit is contained in:
parent
3ce30e07c9
commit
7864a2cd9b
4
llama.h
4
llama.h
@ -191,8 +191,8 @@ extern "C" {
|
||||
float yarn_beta_slow; // YaRN high correction dim
|
||||
uint32_t yarn_orig_ctx; // YaRN original context size
|
||||
|
||||
ggml_type type_k; // data type for K cache
|
||||
ggml_type type_v; // data type for V cache
|
||||
enum ggml_type type_k; // data type for K cache
|
||||
enum ggml_type type_v; // data type for V cache
|
||||
|
||||
// Keep the booleans together to avoid misalignment during copy-by-value.
|
||||
bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true)
|
||||
|
Loading…
Reference in New Issue
Block a user