Skip to content

Commit ebb2a70

Browse files
authored
static EPLB fix bug, add unit test (#1186)
<!-- Thanks for sending a pull request! BEFORE SUBMITTING, PLEASE READ https://docs.vllm.ai/en/latest/contributing/overview.html --> ### What this PR does / why we need it? <!-- - Please clarify what changes you are proposing. The purpose of this section is to outline the changes and how this PR fixes the issue. If possible, please consider writing useful notes for better and faster reviews in your PR. - Please clarify why the changes are needed. For instance, the use case and bug description. - Fixes # --> 1.add static EPLB unit test 2.fix bug: Tensor cannot be directly judged by if statements ### Does this PR introduce _any_ user-facing change? <!-- Note that it means *any* user-facing change including all aspects such as API, interface or other behavior changes. Documentation-only updates are not considered user-facing changes. --> ### How was this patch tested? <!-- CI passed with new added/existing test. If it was tested in a way different from regular unit tests, please clarify how you tested step by step, ideally copy and paste-able, so that other reviewers can test and check, and descendants can verify in the future. If tests were not added, please describe why they were not added and/or why it was difficult to add. --> Run the unit test. --------- Signed-off-by: songshanhu07 <[email protected]>
1 parent 2cd8ecd commit ebb2a70

File tree

4 files changed

+150
-3
lines changed

4 files changed

+150
-3
lines changed

docs/requirements-test.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
pytest-asyncio
2-
2+
pytest-mock

requirements-dev.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ modelscope
44
openai
55
pytest >= 6.0
66
pytest-asyncio
7+
pytest-mock
78
lm-eval
89
ray
910
types-jsonschema
Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
# fused moe ops test will hit the infer_schema error, we need add the patch
2+
# here to make the test pass.
3+
import vllm_ascend.patch.worker.patch_common.patch_utils # type: ignore[import] # isort: skip # noqa
4+
5+
import json
6+
from typing import List, TypedDict
7+
8+
import pytest
9+
import torch
10+
11+
from vllm_ascend.ops.expert_load_balancer import ExpertLoadBalancer
12+
13+
14+
class Device(TypedDict):
15+
device_id: int
16+
device_expert: List[int]
17+
18+
19+
class Layer(TypedDict):
20+
layer_id: int
21+
device_count: int
22+
device_list: List[Device]
23+
24+
25+
class MockData(TypedDict):
26+
moe_layer_count: int
27+
layer_list: List[Layer]
28+
29+
30+
MOCK_DATA: MockData = {
31+
"moe_layer_count":
32+
1,
33+
"layer_list": [{
34+
"layer_id":
35+
0,
36+
"device_count":
37+
2,
38+
"device_list": [{
39+
"device_id": 0,
40+
"device_expert": [7, 2, 0, 3, 5]
41+
}, {
42+
"device_id": 1,
43+
"device_expert": [6, 1, 4, 7, 2]
44+
}]
45+
}]
46+
}
47+
48+
49+
@pytest.fixture
50+
def mock_expert_load_balancer(tmp_path):
51+
json_file = tmp_path / "expert_map.json"
52+
with open(json_file, 'w') as f:
53+
json.dump(MOCK_DATA, f)
54+
55+
return ExpertLoadBalancer(str(json_file), global_expert_num=8)
56+
57+
58+
def test_init(mock_expert_load_balancer):
59+
assert isinstance(mock_expert_load_balancer.expert_map_tensor,
60+
torch.Tensor)
61+
assert mock_expert_load_balancer.layers_num == MOCK_DATA["moe_layer_count"]
62+
assert mock_expert_load_balancer.ranks_num == MOCK_DATA["layer_list"][0][
63+
"device_count"]
64+
65+
66+
def test_generate_index_dicts(mock_expert_load_balancer):
67+
tensor_2d = torch.tensor([[7, 2, 0, 3, 5], [6, 1, 4, 7, 2]])
68+
result = mock_expert_load_balancer.generate_index_dicts(tensor_2d)
69+
expected_result = [{
70+
7: 0,
71+
2: 1,
72+
0: 2,
73+
3: 3,
74+
5: 4
75+
}, {
76+
6: 5,
77+
1: 6,
78+
4: 7,
79+
7: 8,
80+
2: 9
81+
}]
82+
assert result == expected_result
83+
84+
85+
def test_generate_expert_placement_map(mock_expert_load_balancer):
86+
expert_placement_map = mock_expert_load_balancer.generate_expert_placement_map(
87+
)
88+
assert expert_placement_map.shape == (mock_expert_load_balancer.layers_num,
89+
mock_expert_load_balancer.ranks_num,
90+
8)
91+
assert torch.all(expert_placement_map >= -1)
92+
93+
94+
def test_generate_log2phy_expert_map(mock_expert_load_balancer):
95+
layer_id = 0
96+
log2phy_map = mock_expert_load_balancer.generate_log2phy_expert_map(
97+
layer_id)
98+
assert log2phy_map.shape == (mock_expert_load_balancer.ranks_num, 8)
99+
assert torch.all(log2phy_map >= -1)
100+
101+
102+
def test_get_rank_placement_map(mock_expert_load_balancer, mocker):
103+
mocker.patch("torch_npu.npu._lazy_init")
104+
mocker.patch('torch.npu.current_device', return_value='cpu')
105+
layer_id = 0
106+
rank_id = 0
107+
rank_local_expert_num, rank_expert_map = mock_expert_load_balancer.get_rank_placement_map(
108+
layer_id, rank_id)
109+
assert rank_local_expert_num == 5
110+
expected_tensor = torch.tensor([2, -1, 1, 3, -1, 4, -1, 0],
111+
dtype=torch.int32).to(
112+
rank_expert_map.device)
113+
assert rank_expert_map.equal(expected_tensor)
114+
115+
rank_id = 1
116+
rank_local_expert_num, rank_expert_map = mock_expert_load_balancer.get_rank_placement_map(
117+
layer_id, rank_id)
118+
expected_tensor = torch.tensor([-1, 1, 4, -1, 2, -1, 0, 3],
119+
dtype=torch.int32).to(
120+
rank_expert_map.device)
121+
assert rank_expert_map.equal(expected_tensor)
122+
123+
124+
def test_get_rank_log2phy_map(mock_expert_load_balancer):
125+
layer_id = 0
126+
rank_id = 0
127+
log2phy_map = mock_expert_load_balancer.get_rank_log2phy_map(
128+
layer_id, rank_id)
129+
expected_tensor = torch.tensor([2, 6, 1, 3, 7, 4, 5, 0],
130+
dtype=torch.int32).to(log2phy_map.device)
131+
assert log2phy_map.equal(expected_tensor)
132+
133+
rank_id = 1
134+
log2phy_map = mock_expert_load_balancer.get_rank_log2phy_map(
135+
layer_id, rank_id)
136+
expected_tensor = torch.tensor([2, 6, 9, 3, 7, 4, 5, 8],
137+
dtype=torch.int32).to(log2phy_map.device)
138+
assert log2phy_map.equal(expected_tensor)
139+
140+
141+
def test_get_global_redundant_expert_num(mock_expert_load_balancer):
142+
redundant_expert_num = mock_expert_load_balancer.get_global_redundant_expert_num(
143+
)
144+
expected_redundant_expert_num = len(MOCK_DATA["layer_list"][0]["device_list"][0]["device_expert"]) * \
145+
MOCK_DATA["layer_list"][0]["device_count"] - 8
146+
assert redundant_expert_num == expected_redundant_expert_num

vllm_ascend/quantization/w8a8_dynamic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ def fused_experts_with_mc2(
118118
global_redundant_expert_num: int = 0,
119119
shared_experts: Optional[Any] = None,
120120
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
121-
if log2phy:
121+
if log2phy is not None:
122122
topk_ids = log2phy[topk_ids]
123123
global_bs = 0
124124
moe_expert_num = len(expert_map) + global_redundant_expert_num
@@ -233,7 +233,7 @@ def fused_experts_with_all2all(
233233
log2phy: torch.Tensor = None,
234234
global_redundant_expert_num: int = 0,
235235
):
236-
if log2phy:
236+
if log2phy is not None:
237237
topk_ids = log2phy[topk_ids]
238238
original_shape = hidden_states.shape
239239
if len(original_shape) == 3:

0 commit comments

Comments
 (0)