Skip to content

Commit f1f5e82

Browse files
authored
examples : fix is_first logic for tokenization (#14329)
ggml-ci
1 parent af3373f commit f1f5e82

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

examples/simple-chat/simple-chat.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ int main(int argc, char ** argv) {
9898
auto generate = [&](const std::string & prompt) {
9999
std::string response;
100100

101-
const bool is_first = llama_memory_seq_pos_max(llama_get_memory(ctx), 0) == 0;
101+
const bool is_first = llama_memory_seq_pos_max(llama_get_memory(ctx), 0) == -1;
102102

103103
// tokenize the prompt
104104
const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true);

tools/run/run.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -939,7 +939,7 @@ static int apply_chat_template(const struct common_chat_templates * tmpls, Llama
939939
// Function to tokenize the prompt
940940
static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt,
941941
std::vector<llama_token> & prompt_tokens, const LlamaData & llama_data) {
942-
const bool is_first = llama_memory_seq_pos_max(llama_get_memory(llama_data.context.get()), 0) == 0;
942+
const bool is_first = llama_memory_seq_pos_max(llama_get_memory(llama_data.context.get()), 0) == -1;
943943

944944
const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true);
945945
prompt_tokens.resize(n_prompt_tokens);

0 commit comments

Comments
 (0)