Skip to content

Commit 49d59f4

Browse files
committed
ebable sleep mode test
Signed-off-by: wangli <[email protected]>
1 parent eb2701e commit 49d59f4

File tree

2 files changed

+9
-2
lines changed

2 files changed

+9
-2
lines changed

.github/workflows/vllm_ascend_test.yaml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,9 @@ jobs:
127127
pytest -sv tests/singlecard/test_scheduler.py
128128
# guided decoding doesn't work, fix it later
129129
# pytest -sv tests/singlecard/test_guided_decoding.py.py
130-
pytest -sv tests/singlecard/ --ignore=tests/singlecard/test_offline_inference.py --ignore=tests/singlecard/test_scheduler.py --ignore=tests/singlecard/test_guided_decoding.py
130+
pytest -sv tests/singlecard/test_camem.py
131+
pytest -sv tests/singlecard/test_ilama_lora.py
132+
pytest -sv tests/singlecard/test_pyhccl.py
131133
else
132134
pytest -sv tests/multicard/test_ilama_lora_tp2.py
133135
# Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/multicard/test_offline_inference_distributed.py will raise error.

tests/singlecard/test_camem.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616
# See the License for the specific language governing permissions and
1717
# limitations under the License.
1818
#
19+
import os
20+
1921
import pytest
2022
import torch
2123
from vllm import LLM, SamplingParams
@@ -24,7 +26,11 @@
2426
from tests.utils import fork_new_process_for_each_test
2527
from vllm_ascend.device_allocator.camem import CaMemAllocator
2628

29+
if os.getenv("VLLM_USE_V1") == "1":
30+
pytest.skip("Skip in vllm v1", allow_module_level=True)
31+
2732

33+
@fork_new_process_for_each_test
2834
def test_basic_camem():
2935
# some tensors from default memory pool
3036
shape = (1024, 1024)
@@ -57,7 +63,6 @@ def test_basic_camem():
5763
assert torch.allclose(output, torch.ones_like(output) * 3)
5864

5965

60-
@pytest.mark.skipif(True, reason="test failed, should be fixed later")
6166
@fork_new_process_for_each_test
6267
def test_end_to_end():
6368
free, total = torch.npu.mem_get_info()

0 commit comments

Comments
 (0)