Skip to content

Commit 2e90b1d

Browse files
committed
adapt mtp with graph mode in v1
Signed-off-by: whx-sjtu <[email protected]>
1 parent 908a851 commit 2e90b1d

File tree

3 files changed

+54
-12
lines changed

3 files changed

+54
-12
lines changed

vllm_ascend/attention/mla_v1.py

Lines changed: 39 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
AttentionMetadata,
99
MLAAttentionImpl)
1010
from vllm.attention.backends.utils import PAD_SLOT_ID
11+
from vllm.config import get_current_vllm_config
1112
from vllm.model_executor.layers.linear import (LinearBase,
1213
UnquantizedLinearMethod)
1314

@@ -83,6 +84,7 @@ class AscendMLADecodeMetadata:
8384
seq_lens: torch.Tensor
8485
max_seq_lens: int
8586
seq_lens_list: list[int]
87+
attn_mask: torch.Tensor
8688

8789

8890
@dataclass
@@ -170,11 +172,13 @@ def reorder_batch(self, input_batch: "InputBatch",
170172

171173
for i, req_id in enumerate(input_batch.req_ids):
172174
num_tokens = scheduler_output.num_scheduled_tokens[req_id]
175+
num_spec_tokens = len(
176+
scheduler_output.scheduled_spec_decode_tokens.get(req_id, []))
173177
# for now treat 1 scheduled token as "decode" even if its not,
174178
# we should update this to something like < 8 in the future but
175179
# currently the TritonMLA._forward_decode only supports
176180
# num_tokens = 1
177-
if num_tokens == 1:
181+
if num_tokens - num_spec_tokens == 1:
178182
decodes.append(i)
179183
num_decode_tokens += num_tokens
180184
else:
@@ -317,7 +321,7 @@ def build(
317321
seq_lens = seq_lens_cpu
318322
max_query_len = query_lens.max().item()
319323
max_seq_lens = seq_lens.max().item()
320-
query_start_loc = None
324+
query_start_loc = common_attn_metadata.query_start_loc
321325

322326
prefill_metadata = None
323327
if self._num_prefills > 0:
@@ -382,7 +386,8 @@ def build(
382386
block_table=block_table,
383387
seq_lens=seq_lens,
384388
seq_lens_list=seq_lens.tolist(),
385-
max_seq_lens=max_seq_lens)
389+
max_seq_lens=max_seq_lens,
390+
attn_mask=self.runner.spec_attn_mask)
386391

387392
return self.metadata_cls( # type: ignore
388393
num_actual_tokens=num_actual_tokens,
@@ -445,6 +450,17 @@ def __init__(
445450

446451
ascend_config = get_ascend_config()
447452
self.torchair_graph_enabled = ascend_config.torchair_graph_config.enabled
453+
# Adapt torch air graph mode with spec decoding.
454+
speculative_config = get_current_vllm_config().speculative_config
455+
self.fia_sparse_mode = 0
456+
self.use_spec_decode = False
457+
# We need to set the sparse_mode of fused_infer_attention op to 3
458+
# in spec decoding scenario in order to pass in attention mask.
459+
if speculative_config is not None:
460+
self.fia_sparse_mode = 3
461+
self.use_spec_decode = True
462+
self.spec_token_num = speculative_config.num_speculative_tokens
463+
assert self.spec_token_num > 0
448464

449465
def _v_up_proj_and_o_proj(self, x):
450466
# Convert from (B, N, L) to (N, B, L)
@@ -646,9 +662,24 @@ def _forward_decode(
646662
dtype=q.dtype,
647663
device=q.device)
648664
if self.running_in_graph:
649-
# TorchAir's shape is [bs, num_heads_per_rank, seq_len, dim]
650-
q_nope = q_nope.view(num_tokens, self.num_heads, 1, -1)
651-
q_pe = q_pe.view(num_tokens, self.num_heads, 1, -1)
665+
# TorchAir's shape is [bs, num_heads_per_rank, q_seq_len, dim]
666+
if self.use_spec_decode:
667+
assert num_tokens % self.spec_token_num == 0
668+
q_nope = (q_nope.view(
669+
num_tokens // (self.spec_token_num + 1),
670+
self.spec_token_num + 1,
671+
self.num_heads,
672+
-1,
673+
).transpose(1, 2).contiguous())
674+
q_pe = (q_pe.view(
675+
num_tokens // (self.spec_token_num + 1),
676+
self.spec_token_num + 1,
677+
self.num_heads,
678+
-1,
679+
).transpose(1, 2).contiguous())
680+
else:
681+
q_nope = q_nope.view(num_tokens, self.num_heads, 1, -1)
682+
q_pe = q_pe.view(num_tokens, self.num_heads, 1, -1)
652683
# shape of knope/k_pe for npu graph mode should be:
653684
# [num_blocks, num_kv_heads, block_size, self.kv_lora_rank/self.qk_rope_head_dim]
654685
block_size = kv_c_and_k_pe_cache[0].shape[1]
@@ -666,7 +697,8 @@ def _forward_decode(
666697
num_heads=self.num_heads,
667698
num_key_value_heads=self.num_kv_heads,
668699
input_layout="BNSD",
669-
atten_mask=attn_metadata.attn_mask,
700+
atten_mask=attn_metadata.decode.attn_mask, # type:ignore
701+
sparse_mode=self.fia_sparse_mode,
670702
scale=self.scale,
671703
antiquant_mode=0,
672704
antiquant_scale=None,

vllm_ascend/worker/model_runner_v1.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -196,8 +196,13 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device):
196196

197197
# Set up speculative decoding.
198198
self.use_spec_decode = False
199+
self.spec_attn_mask = None
199200
if self.speculative_config:
200201
self.use_spec_decode = True
202+
self.spec_attn_mask = torch.triu(torch.ones(2048,
203+
2048,
204+
dtype=torch.bool),
205+
diagonal=1).to("npu")
201206
if get_pp_group().is_last_rank:
202207
if self.speculative_config.method == "ngram":
203208
self.drafter = NgramProposer(self.vllm_config)
@@ -564,10 +569,13 @@ def _process_reqs(
564569
# Get the number of scheduled tokens for each request.
565570
# TODO: The Python loop can be slow. Optimize.
566571
num_scheduled_tokens = np.empty(num_reqs, dtype=np.int32)
572+
num_valid_tokens = np.empty(num_reqs, dtype=np.int32)
567573
max_num_scheduled_tokens = 0
568574
for i, req_id in enumerate(self.input_batch.req_ids):
569575
num_tokens = scheduler_output.num_scheduled_tokens[req_id]
570576
num_scheduled_tokens[i] = num_tokens
577+
num_valid_tokens[i] = num_tokens - \
578+
len(scheduler_output.scheduled_spec_decode_tokens.get(req_id, []))
571579
max_num_scheduled_tokens = max(max_num_scheduled_tokens,
572580
num_tokens)
573581

@@ -615,7 +623,7 @@ def _process_reqs(
615623
if np.array_equal(self.seq_lens_np[:num_reqs], num_scheduled_tokens):
616624
attn_state = AscendAttentionState.PrefillNoCache
617625
# We assume it is the decode stage, where prefill occurs but only one token is not hit in cache.
618-
elif np.all(num_scheduled_tokens == 1):
626+
elif np.all(num_valid_tokens == 1):
619627
attn_state = AscendAttentionState.DecodeOnly
620628
# splitfuse
621629
elif not ascend_config.ascend_scheduler_config.enabled or self.chunked_prefill_enabled:
@@ -657,14 +665,14 @@ def _process_reqs(
657665
# Add graph_pad_size here
658666
if envs_ascend.VLLM_ENABLE_MC2 or (self.torchair_graph_enabled
659667
and not with_prefill):
660-
batch_size = len(seq_lens)
661668
if self.dp_size > 1:
662669
padded_batch_size = self.select_torchair_padded_batch_size(
663670
max_num_tokens)
664671
else:
665672
padded_batch_size = self.select_torchair_padded_batch_size(
666-
batch_size)
667-
graph_pad_size = padded_batch_size - batch_size
673+
total_num_scheduled_tokens)
674+
graph_pad_size = padded_batch_size - total_num_scheduled_tokens
675+
668676
extra_builder_kwargs['graph_pad_size'] = graph_pad_size
669677

670678
if self.vllm_config.model_config.use_mla:

vllm_ascend/worker/mtp_proposer_v1.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
set_current_vllm_config)
55
from vllm.forward_context import set_forward_context
66
from vllm.model_executor.model_loader import get_model_loader
7-
from vllm.model_executor.model_loader.utils import set_default_torch_dtype
7+
from vllm.model_executor.model_loader.utils import set_default_torch_dtype, process_weights_after_loading
88
from vllm.v1.sample.metadata import SamplingMetadata
99

1010
from vllm_ascend.attention.mla_v1 import CommonAttentionMetadata
@@ -199,6 +199,8 @@ def load_model(self) -> None:
199199
loader.get_all_weights(
200200
self.vllm_config.speculative_config.draft_model_config,
201201
self.model))
202+
process_weights_after_loading(self.model, draft_model_config,
203+
target_device)
202204

203205

204206
# TODO Using torch instead of triton may result in poor performance

0 commit comments

Comments
 (0)