Skip to content

Commit 874cac8

Browse files
committed
fix lint
1 parent 95b4c19 commit 874cac8

6 files changed

Lines changed: 15 additions & 9 deletions

File tree

autotest/interface/pipeline/test_pipeline_sleep_wakeup.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,6 @@
77

88
import pytest
99
import torch
10-
from lmdeploy import GenerationConfig, PytorchEngineConfig, TurbomindEngineConfig, pipeline
11-
from lmdeploy.messages import Response
12-
from lmdeploy.serve.openai.protocol import UpdateParamsRequest
13-
from lmdeploy.utils import is_bf16_supported
1410
from utils.config_utils import get_parallel_config
1511
from utils.constant import SLEEP_WAKEUP_BACKENDS, SLEEP_WAKEUP_MODEL_LIST
1612
from utils.sleep_utils import (
@@ -25,6 +21,11 @@
2521
resolve_hf_checkpoint_dir,
2622
)
2723

24+
from lmdeploy import GenerationConfig, PytorchEngineConfig, TurbomindEngineConfig, pipeline
25+
from lmdeploy.messages import Response
26+
from lmdeploy.serve.openai.protocol import UpdateParamsRequest
27+
from lmdeploy.utils import is_bf16_supported
28+
2829
_SLEEP_PIPELINE_BACKEND_CLASS = {
2930
'pytorch': PytorchEngineConfig,
3031
'turbomind': TurbomindEngineConfig,

autotest/interface/restful/test_restful_abort_request.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,11 @@
55

66
import pytest
77
import requests
8-
from lmdeploy.serve.openai.api_client import APIClient
98
from utils.constant import BACKEND_LIST, DEFAULT_PORT, DEFAULT_SERVER, RESTFUL_MODEL_LIST
109
from utils.restful_return_check import assert_chat_completions_batch_return
1110

11+
from lmdeploy.serve.openai.api_client import APIClient
12+
1213
BASE_URL = f'http://{DEFAULT_SERVER}:{DEFAULT_PORT}'
1314
JSON_HEADERS = {'Content-Type': 'application/json'}
1415
_REQUEST_TIMEOUT = 300

autotest/interface/restful/test_restful_sleep_wakeup.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import pytest
55
import requests
66
import torch
7-
from lmdeploy.serve.openai.api_client import APIClient
87
from utils.constant import (
98
DEFAULT_PORT,
109
DEFAULT_SERVER,
@@ -25,6 +24,8 @@
2524
resolve_hf_checkpoint_dir,
2625
)
2726

27+
from lmdeploy.serve.openai.api_client import APIClient
28+
2829
BASE_URL = f'http://{DEFAULT_SERVER}:{DEFAULT_PORT}'
2930
JSON_HEADERS = {'Content-Type': 'application/json'}
3031
_REQUEST_TIMEOUT = 120

autotest/tools/pipeline/mllm_case.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,11 @@
44

55
import fire # noqa: E402
66
import numpy as np # noqa: E402
7+
from PIL import Image # noqa: E402
8+
79
from lmdeploy import GenerationConfig, PytorchEngineConfig, TurbomindEngineConfig, pipeline # noqa: E402
810
from lmdeploy.vl import encode_image_base64, load_image, load_video # noqa: E402
911
from lmdeploy.vl.constants import IMAGE_TOKEN # noqa: E402
10-
from PIL import Image # noqa: E402
1112

1213
gen_config = GenerationConfig(max_new_tokens=500, min_new_tokens=10)
1314

autotest/utils/run_restful_chat.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
import allure
88
import psutil
99
import requests
10-
from lmdeploy.serve.openai.api_client import APIClient
1110
from openai import APIStatusError, BadRequestError, OpenAI
1211
from pytest_assume.plugin import assume
1312
from utils.config_utils import (
@@ -21,6 +20,8 @@
2120
from utils.restful_return_check import assert_chat_completions_batch_return
2221
from utils.rule_condition_assert import assert_result
2322

23+
from lmdeploy.serve.openai.api_client import APIClient
24+
2425
BASE_HTTP_URL = f'http://{DEFAULT_SERVER}'
2526

2627

autotest/utils/sleep_utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,11 @@
88
from typing import Any
99

1010
import torch
11-
from lmdeploy.utils import serialize_state_dict
1211
from safetensors.torch import safe_open
1312
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME
1413

14+
from lmdeploy.utils import serialize_state_dict
15+
1516
UPDATE_WEIGHTS_CUDA_DEVICE_ENV = 'LMDEPLOY_UPDATE_WEIGHTS_CUDA_DEVICE'
1617

1718
LEVEL2_GREEDY_MESSAGES = [{'role': 'user', 'content': '424242'}]

0 commit comments

Comments
 (0)