diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 4969d2628c131..9a680aed4dcfd 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -439,7 +439,7 @@ struct llm_tokenizer_bpe_session { "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. " "Are you sure this is what you want?\n", __FUNCTION__); } - if (vocab.get_add_bos() && output.size() >= 2 && *(output.end()-2) == vocab.token_eos()) { + if (vocab.get_add_eos() && output.size() >= 2 && *(output.end()-2) == vocab.token_eos()) { LLAMA_LOG_WARN( "%s: Added a EOS token to the prompt as specified by the model but the prompt " "also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "