From 5418932b718033f12ec75cbdedf0be1d2c4deaf2 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 3 Oct 2023 21:01:45 +0300 Subject: [PATCH] llama : fix comments for llama_kv_cache API --- llama.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/llama.h b/llama.h index c4333d652..cf52cb6d6 100644 --- a/llama.h +++ b/llama.h @@ -330,16 +330,16 @@ extern "C" { "avoid using this, it will be removed in the future, instead - count the tokens in user code"); // Remove all tokens data of cells in [c0, c1) - // c0 < -1 : [0, c1] - // c1 < -1 : [c0, inf) + // c0 < 0 : [0, c1] + // c1 < 0 : [c0, inf) LLAMA_API void llama_kv_cache_tokens_rm( struct llama_context * ctx, int32_t c0, int32_t c1); // Removes all tokens that belong to the specified sequence and have positions in [p0, p1) - // p0 < -1 : [0, p1] - // p1 < -1 : [p0, inf) + // p0 < 0 : [0, p1] + // p1 < 0 : [p0, inf) LLAMA_API void llama_kv_cache_seq_rm( struct llama_context * ctx, llama_seq_id seq_id, @@ -348,8 +348,8 @@ extern "C" { // Copy all tokens that belong to the specified sequence to another sequence // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence - // p0 < -1 : [0, p1] - // p1 < -1 : [p0, inf) + // p0 < 0 : [0, p1] + // p1 < 0 : [p0, inf) LLAMA_API void llama_kv_cache_seq_cp( struct llama_context * ctx, llama_seq_id seq_id_src, @@ -364,8 +364,8 @@ extern "C" { // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) // If the KV cache is RoPEd, the KV data is updated accordingly - // p0 < -1 : [0, p1] - // p1 < -1 : [p0, inf) + // p0 < 0 : [0, p1] + // p1 < 0 : [p0, inf) LLAMA_API void llama_kv_cache_seq_shift( struct llama_context * ctx, llama_seq_id seq_id,