Skip to content

Commit d298382

Browse files
authored
main: replace --no-special with --special (#7534)
This also flips the default behavior of the output to not include control token by default.
1 parent 32a2821 commit d298382

File tree

3 files changed

+6
-12
lines changed

3 files changed

+6
-12
lines changed

common/common.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -904,8 +904,8 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
904904
params.interactive_specials = true;
905905
return true;
906906
}
907-
if (arg == "--no-special") {
908-
params.no_special = true;
907+
if (arg == "--special") {
908+
params.special = true;
909909
return true;
910910
}
911911
if (arg == "--embedding") {
@@ -1366,9 +1366,9 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
13661366
printf(" -h, --help show this help message and exit\n");
13671367
printf(" --version show version and build info\n");
13681368
printf(" -i, --interactive run in interactive mode\n");
1369+
printf(" --special special tokens output enabled\n");
13691370
printf(" --interactive-specials allow special tokens in user text, in interactive mode\n");
13701371
printf(" --interactive-first run in interactive mode and wait for input right away\n");
1371-
printf(" --no-special control tokens output disabled\n");
13721372
printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n");
13731373
printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
13741374
printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n");

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ struct gpt_params {
146146
bool use_color = false; // use color to distinguish generations and inputs
147147
bool interactive = false; // interactive mode
148148
bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
149-
bool no_special = false; // disable control token output
149+
bool special = false; // enable special token output
150150
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
151151
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
152152
bool prompt_cache_all = false; // save user input and generations to prompt cache

examples/main/main.cpp

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -740,16 +740,10 @@ int main(int argc, char ** argv) {
740740
// display text
741741
if (input_echo && display) {
742742
for (auto id : embd) {
743-
const std::string token_str = llama_token_to_piece(ctx, id);
743+
const std::string token_str = llama_token_to_piece(ctx, id, params.special);
744744

745745
// Console/Stream Output
746-
if (!llama_token_is_control(llama_get_model(ctx), id)) {
747-
// Stream Output Token To Standard Output
748-
fprintf(stdout, "%s", token_str.c_str());
749-
} else if (!params.no_special && !params.conversation) {
750-
// Stream Control Token To Standard Output Stream
751-
fprintf(stdout, "%s", token_str.c_str());
752-
}
746+
fprintf(stdout, "%s", token_str.c_str());
753747

754748
// Record Displayed Tokens To Log
755749
// Note: Generated tokens are created one by one hence this check

0 commit comments

Comments
 (0)