From 739b85c98564a2f48678550f3ab5b6da7302c6bc Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 29 Oct 2023 11:25:32 +0200 Subject: [PATCH] llama : try to fix build --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index d9c4fb3e3..ac359da69 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5544,7 +5544,7 @@ static struct ggml_cgraph * llama_build_graph( // should we offload the final norm? yes if we are not computing embeddings const bool offload_emb = lctx.embedding.empty(); - static const std::unordered_map k_offload_func_name = { + static const std::unordered_map> k_offload_func_name = { { OFFLOAD_FUNC_NOP, "CPU" }, #ifdef GGML_USE_CUBLAS { OFFLOAD_FUNC, "GPU (CUDA)" },