From 6183d9f7a5da0083571e688be9e87ac184135b68 Mon Sep 17 00:00:00 2001 From: Bailey Chittle Date: Thu, 7 Mar 2024 16:21:07 -0500 Subject: [PATCH] override simple, for my tests and use case --- examples/simple/simple.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 39e2d8ea490e3..e2fe3a2bb992e 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -23,11 +23,14 @@ int main(int argc, char ** argv) { } if (params.prompt.empty()) { - params.prompt = "Hello my name is"; + params.prompt = R"(user +Hello! +model +)"; } // total length of the sequence including the prompt - const int n_len = 32; + const int n_len = 128; // init LLM @@ -85,10 +88,10 @@ int main(int argc, char ** argv) { fprintf(stderr, "\n"); for (auto id : tokens_list) { - fprintf(stderr, "%s", llama_token_to_piece(ctx, id).c_str()); + fprintf(stdout, "%s", llama_token_to_piece(ctx, id).c_str()); } - fflush(stderr); + fflush(stdout); // create a llama_batch with size 512 // we use this object to submit token data for decoding @@ -140,7 +143,8 @@ int main(int argc, char ** argv) { break; } - LOG_TEE("%s", llama_token_to_piece(ctx, new_token_id).c_str()); + // LOG_TEE("%s", llama_token_to_piece(ctx, new_token_id).c_str()); + fprintf(stdout, "%s", llama_token_to_piece(ctx, new_token_id).c_str()); fflush(stdout); // prepare the next batch