Skip to content

Commit 62af464

Browse files
authored
batch : fix check for empty sequences in memory (#14364)
* batch : fix check for empty sequences in memory ggml-ci * cont : reuse the var ggml-ci
1 parent c148cf1 commit 62af464

File tree

1 file changed

+6
-4
lines changed

1 file changed

+6
-4
lines changed

src/llama-batch.cpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -244,19 +244,21 @@ bool llama_batch_allocr::init(
244244
continue;
245245
}
246246

247-
if (memory) {
247+
const llama_pos p0 = memory ? memory->seq_pos_max(s) : -1;
248+
249+
if (p0 >= 0) {
248250
bool ok = true;
249251

250252
if (batch.token) {
251-
if (seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
253+
if (seq_pos_min(s) != p0 + 1) {
252254
ok = false;
253255
}
254256
} else {
255257
assert(batch.embd);
256258

257259
// for embeddings (typically used as vision input), we allow them to have repeating positions
258260
// ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
259-
if (seq_pos_min(s) != memory->seq_pos_max(s) && seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
261+
if (seq_pos_min(s) != p0 && seq_pos_min(s) != p0 + 1) {
260262
ok = false;
261263
}
262264
}
@@ -267,7 +269,7 @@ bool llama_batch_allocr::init(
267269
" - the last position stored in the memory module of the context (i.e. the KV cache) for sequence %d is X = %d\n"
268270
" - the tokens for sequence %d in the input batch have a starting position of Y = %d\n"
269271
" it is required that the sequence positions remain consecutive: Y = X + 1\n",
270-
__func__, s, s, memory->seq_pos_max(s), s, seq_pos_min(s));
272+
__func__, s, s, p0, s, seq_pos_min(s));
271273

272274
return false;
273275
}

0 commit comments

Comments
 (0)