From d298382ad977ec89c8de7b57459b9d7965d2c272 Mon Sep 17 00:00:00 2001
From: Brian <mofosyne@gmail.com>
Date: Mon, 27 May 2024 00:10:17 +1000
Subject: [PATCH] main: replace --no-special with --special (#7534)

This also flips the default behavior of the output to not include control token by default.
---
 common/common.cpp      |  6 +++---
 common/common.h        |  2 +-
 examples/main/main.cpp | 10 ++--------
 3 files changed, 6 insertions(+), 12 deletions(-)

diff --git a/common/common.cpp b/common/common.cpp
index 781f2166b..65103c3c2 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -904,8 +904,8 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
         params.interactive_specials = true;
         return true;
     }
-    if (arg == "--no-special") {
-        params.no_special = true;
+    if (arg == "--special") {
+        params.special = true;
         return true;
     }
     if (arg == "--embedding") {
@@ -1366,9 +1366,9 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
     printf("  -h, --help            show this help message and exit\n");
     printf("  --version             show version and build info\n");
     printf("  -i, --interactive     run in interactive mode\n");
+    printf("  --special             special tokens output enabled\n");
     printf("  --interactive-specials allow special tokens in user text, in interactive mode\n");
     printf("  --interactive-first   run in interactive mode and wait for input right away\n");
-    printf("  --no-special          control tokens output disabled\n");
     printf("  -cnv, --conversation  run in conversation mode (does not print special tokens and suffix/prefix)\n");
     printf("  -ins, --instruct      run in instruction mode (use with Alpaca models)\n");
     printf("  -cml, --chatml        run in chatml mode (use with ChatML-compatible models)\n");
diff --git a/common/common.h b/common/common.h
index 5388f6b68..264504830 100644
--- a/common/common.h
+++ b/common/common.h
@@ -146,7 +146,7 @@ struct gpt_params {
     bool use_color         = false; // use color to distinguish generations and inputs
     bool interactive       = false; // interactive mode
     bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
-    bool no_special        = false; // disable control token output
+    bool special           = false; // enable special token output
     bool conversation      = false; // conversation mode (does not print special tokens and suffix/prefix)
     bool chatml            = false; // chatml mode (used for models trained on chatml syntax)
     bool prompt_cache_all  = false; // save user input and generations to prompt cache
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index ac35772f1..44949ba86 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -740,16 +740,10 @@ int main(int argc, char ** argv) {
         // display text
         if (input_echo && display) {
             for (auto id : embd) {
-                const std::string token_str = llama_token_to_piece(ctx, id);
+                const std::string token_str = llama_token_to_piece(ctx, id, params.special);
 
                 // Console/Stream Output
-                if (!llama_token_is_control(llama_get_model(ctx), id)) {
-                    // Stream Output Token To Standard Output
-                    fprintf(stdout, "%s", token_str.c_str());
-                } else if (!params.no_special && !params.conversation) {
-                    // Stream Control Token To Standard Output Stream
-                    fprintf(stdout, "%s", token_str.c_str());
-                }
+                fprintf(stdout, "%s", token_str.c_str());
 
                 // Record Displayed Tokens To Log
                 // Note: Generated tokens are created one by one hence this check