llama : do not allocate KV cache for "vocab_only == true" (#682)

Fixes sanitizer CI
This commit is contained in:
Stephan Walter 2023-04-02 07:18:53 +00:00 committed by GitHub
parent c4f89d8d73
commit 81040f10aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1608,7 +1608,7 @@ struct llama_context * llama_init_from_file(
} }
// reserve memory for context buffers // reserve memory for context buffers
{ if (!params.vocab_only) {
if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx)) { if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx)) {
fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__); fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
llama_free(ctx); llama_free(ctx);