From d70a267804381d2a4c0d4f69f45a201cfb9619a8 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 2 May 2025 18:58:28 +0300 Subject: [PATCH] context : fix reorder logic ggml-ci --- src/llama-context.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index a88b9a5ff90da..45591be992d87 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1050,6 +1050,9 @@ int llama_context::decode(llama_batch & inp_batch) { // finalize the batch processing kv_guard.commit(); + // set to total number of outputs in the batch, for use in llama_get_logits_ith + n_outputs = n_outputs_all; + // set output mappings { bool sorted_output = true; @@ -1103,9 +1106,6 @@ int llama_context::decode(llama_batch & inp_batch) { } } - // set to total number of outputs in the batch, for use in llama_get_logits_ith - n_outputs = n_outputs_all; - // wait for the computation to finish (automatically done when obtaining the model output) //synchronize();