Skip to content

Commit 36376ab

Browse files
authored
server : --n-predict option document and cap to max value (#5549)
* server: document --n-predict * server: ensure client request cannot override n_predict if set * server: fix print usage LF in new --n-predict option
1 parent 66c1968 commit 36376ab

File tree

2 files changed

+15
-1
lines changed

2 files changed

+15
-1
lines changed

examples/server/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ see https://github.com/ggerganov/llama.cpp/issues/1437
3939
- `--mmproj MMPROJ_FILE`: Path to a multimodal projector file for LLaVA.
4040
- `--grp-attn-n`: Set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`
4141
- `--grp-attn-w`: Set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`
42+
- `-n, --n-predict`: Set the maximum tokens to predict (default: -1)
4243

4344
## Build
4445

examples/server/server.cpp

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,7 @@ struct llama_client_slot
159159
int32_t n_decoded = 0;
160160
int32_t n_remaining = -1;
161161
int32_t i_batch = -1;
162+
int32_t n_predict = -1;
162163

163164
int32_t num_prompt_tokens = 0;
164165
int32_t num_prompt_tokens_processed = 0;
@@ -410,6 +411,7 @@ struct llama_server_context
410411

411412
slot.id = i;
412413
slot.n_ctx = n_ctx_slot;
414+
slot.n_predict = params.n_predict;
413415

414416
LOG_TEE(" -> Slot %i - max context: %i\n", slot.id, n_ctx_slot);
415417

@@ -546,6 +548,15 @@ struct llama_server_context
546548
slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
547549
slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
548550

551+
if (slot->n_predict > 0 && slot->params.n_predict > slot->n_predict) {
552+
// Might be better to reject the request with a 400 ?
553+
LOG_WARNING("Max tokens to predict exceeds server configuration", {
554+
{"params.n_predict", slot->params.n_predict},
555+
{"slot.n_predict", slot->n_predict},
556+
});
557+
slot->params.n_predict = slot->n_predict;
558+
}
559+
549560
// infill
550561
if (data.count("input_prefix") != 0)
551562
{
@@ -1053,6 +1064,7 @@ struct llama_server_context
10531064

10541065
return json {
10551066
{"n_ctx", slot.n_ctx},
1067+
{"n_predict", slot.n_predict},
10561068
{"model", params.model_alias},
10571069
{"seed", slot.params.seed},
10581070
{"temperature", slot.sparams.temp},
@@ -1915,13 +1927,14 @@ static void server_print_usage(const char *argv0, const gpt_params &params,
19151927
printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
19161928
printf(" --log-disable disables logging to a file.\n");
19171929
printf("\n");
1930+
printf(" -n, --n-predict maximum tokens to predict (default: %d)\n", params.n_predict);
19181931
printf(" --override-kv KEY=TYPE:VALUE\n");
19191932
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
19201933
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
19211934
printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`");
19221935
printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`");
19231936
printf(" --chat-template FORMAT_NAME");
1924-
printf(" set chat template, possible valus is: llama2, chatml (default %s)", sparams.chat_template.c_str());
1937+
printf(" set chat template, possible value is: llama2, chatml (default %s)", sparams.chat_template.c_str());
19251938
printf("\n");
19261939
}
19271940

0 commit comments

Comments
 (0)