common : revert showing control tokens by default for server (#6860)

* fix: revert showing control tokens by default

* feat: revert changes to default behavior of llama_token_to_piece; provide overridden declaration to receive "bool special" param to toggle showing control tokens

* feat: use the overridden declaration of llama_token_to_piece from common/common.cpp to specify "false" so that control tokens are not shown in chat completion responses"

* common : simplify

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
Kyle Mistele 2024-04-24 05:15:29 -05:00 committed by GitHub
parent 28103f4832
commit 37246b1031
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 7 additions and 6 deletions

View File

@ -2328,12 +2328,12 @@ std::vector<llama_token> llama_tokenize(
return result; return result;
} }
std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) { std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
std::vector<char> result(8, 0); std::vector<char> result(8, 0);
const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), true); const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
if (n_tokens < 0) { if (n_tokens < 0) {
result.resize(-n_tokens); result.resize(-n_tokens);
int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), true); int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
GGML_ASSERT(check == -n_tokens); GGML_ASSERT(check == -n_tokens);
} else { } else {
result.resize(n_tokens); result.resize(n_tokens);

View File

@ -237,11 +237,12 @@ std::vector<llama_token> llama_tokenize(
bool add_special, bool add_special,
bool parse_special = false); bool parse_special = false);
// tokenizes a token into a piece // tokenizes a token into a piece, optionally renders special/control tokens
// should work similar to Python's `tokenizer.id_to_piece` // should work similar to Python's `tokenizer.id_to_piece`
std::string llama_token_to_piece( std::string llama_token_to_piece(
const struct llama_context * ctx, const struct llama_context * ctx,
llama_token token); llama_token token,
bool special = true);
// TODO: these should be moved in llama.h C-style API under single `llama_detokenize` function // TODO: these should be moved in llama.h C-style API under single `llama_detokenize` function
// that takes into account the tokenizer type and decides how to handle the leading space // that takes into account the tokenizer type and decides how to handle the leading space

View File

@ -1117,7 +1117,7 @@ struct server_context {
bool process_token(completion_token_output & result, server_slot & slot) { bool process_token(completion_token_output & result, server_slot & slot) {
// remember which tokens were sampled - used for repetition penalties during sampling // remember which tokens were sampled - used for repetition penalties during sampling
const std::string token_str = llama_token_to_piece(ctx, result.tok); const std::string token_str = llama_token_to_piece(ctx, result.tok, false);
slot.sampled = result.tok; slot.sampled = result.tok;
// search stop word and delete it // search stop word and delete it