From 1725de768eec19d383195cc430b87f463cd2d64b Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Wed, 17 Jul 2024 15:36:56 -0400 Subject: [PATCH] llama : fix t5 segfault --- src/llama.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index 738f6d3af..9aae33e00 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -15377,11 +15377,13 @@ static int llama_encode_internal( ggml_backend_tensor_get_async(backend_embd, embd, embd_out, 0, n_tokens*n_embd*sizeof(float)); + GGML_ASSERT(!ubatch.equal_seqs); // TODO: use batch.n_seqs instead of failing + // remember the sequence ids used during the encoding - needed for cross attention later lctx.seq_ids_enc.resize(n_tokens); for (uint32_t i = 0; i < n_tokens; i++) { - for (int s = 0; s < batch.n_seq_id[i]; s++) { - llama_seq_id seq_id = batch.seq_id[i][s]; + for (int s = 0; s < ubatch.n_seq_id[i]; s++) { + llama_seq_id seq_id = ubatch.seq_id[i][s]; lctx.seq_ids_enc[i].insert(seq_id); } }