From 0aed791064ed6a513daf5bf5e78fe637b295e5ee Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 12 Jun 2023 20:56:06 +0300 Subject: [PATCH] llama : do a warm-up eval at start for better timings --- examples/main/main.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 66d563143a5c6..efa913e165f6c 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -331,6 +331,13 @@ int main(int argc, char ** argv) { std::vector embd; + // do one empty run to warm up the model + { + const std::vector tmp = { llama_token_bos(), }; + llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); + llama_reset_timings(ctx); + } + while ((n_remain != 0 && !is_antiprompt) || params.interactive) { // predict if (embd.size() > 0) {