Description
Checklist
- I have searched related issues but cannot get the expected help.
- 2. I have read the FAQ documentation but cannot get the expected help.
- 3. The bug has not been fixed in the latest version.
Describe the bug
So I have managed to train one model on non square input sizes - height-1216, width - 1920. I optimised this model using mmdeploy and converted the model to tensorrt with FP16 precision using the tools/deploy.py script. However, when visualising the sample result, there are less number of objects detected by the TensorRT model as compared to PyTorch model. I believe this is not a problem with optimisation or quantisation, as the objects that have been correctly detected by TensorRT model have the exact same location and confidence as PyTorch model. Moreover, the TensorRT model is only missing objects in places where the objects are closely and densely located, which leads me to believe that there is discrepancy with the post processing pipeline. Please help me in identifying the problem and fixing this. I'm attaching all the config files below for your reference.
Model config file
default_scope = 'mmdet'
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=10),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='DetVisualizationHook'))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'))
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='DetLocalVisualizer',
vis_backends=[dict(type='LocalVisBackend')],
name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO'
load_from = '/media/chetan/Project/Projects/rtmdet_train/mmdetection/work_dirs/config_corrected_det/epoch_160.pth'
resume = True
train_cfg = dict(
type='EpochBasedTrainLoop',
max_epochs=300,
val_interval=1,
dynamic_intervals=[(80, 1)])
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='LinearLR', start_factor=1e-05, by_epoch=False, begin=0,
end=1000),
dict(
type='CosineAnnealingLR',
eta_min=0.0002,
begin=150,
end=300,
T_max=100,
by_epoch=True,
convert_to_iter_based=True)
]
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.05),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
auto_scale_lr = dict(enable=False, base_batch_size=96)
dataset_type = 'CocoDataset'
data_root = '/home/chetan/Desktop/rtmdet_training/coco_finetuning_data'
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=False,
poly2mask=False),
dict(type='CachedMosaic', img_scale=(1920, 1216), pad_val=114.0, prob=0.2),
dict(
type='RandomResize',
scale=(1920, 1216),
ratio_range=(0.8, 1.2),
keep_ratio=True,
prob=0.1),
dict(
type='RandomCrop',
crop_size=(1920, 1216),
recompute_bbox=True,
allow_negative_crop=True,
prob=0.1),
dict(type='YOLOXHSVRandomAug', prob=0.1),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(1920, 1216), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(1920, 1216),
ratio_range=(1.0, 1.0),
max_cached_images=20,
prob=0.1,
pad_val=(114, 114, 114)),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(type='Resize', scale=(1920, 1216), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=96,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=None,
dataset=dict(
type='CocoDataset',
data_root='/home/chetan/Desktop/rtmdet_training/coco_finetuning_data',
ann_file='train/coco_annotations.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=False,
poly2mask=False),
dict(
type='CachedMosaic',
img_scale=(1920, 1216),
pad_val=114.0,
prob=0.2),
dict(
type='RandomResize',
scale=(1920, 1216),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(1920, 1216),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(
type='Pad', size=(1920, 1216),
pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(1920, 1216),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114),
prob=0.2),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='PackDetInputs')
],
backend_args=None,
metainfo=dict(
classes=('Neoplastic', 'Inflammatory', 'Stroma',
'Necrosis/Dead Cells', 'Normal Epithelial'),
palette=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 255, 255)])),
pin_memory=True)
val_dataloader = dict(
batch_size=32,
num_workers=8,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='CocoDataset',
data_root='/home/chetan/Desktop/rtmdet_training/coco_finetuning_data',
ann_file='val/coco_annotations.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=[
dict(type='LoadImageFromFile', backend_args=None),
dict(type='Resize', scale=(1920, 1216), keep_ratio=True),
dict(
type='Pad', size=(1920, 1216),
pad_val=dict(img=(114, 114, 114))),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
],
backend_args=None,
metainfo=dict(
classes=('Neoplastic', 'Inflammatory', 'Stroma',
'Necrosis/Dead Cells', 'Normal Epithelial'),
palette=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 255, 255)])))
test_dataloader = dict(
batch_size=64,
num_workers=8,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='CocoDataset',
data_root='/home/chetan/Desktop/rtmdet_training/coco_finetuning_data',
ann_file='val/coco_annotations.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=[
dict(type='LoadImageFromFile', backend_args=None),
dict(type='Resize', scale=(1920, 1216), keep_ratio=True),
dict(
type='Pad', size=(1920, 1216),
pad_val=dict(img=(114, 114, 114))),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
],
backend_args=None,
metainfo=dict(
classes=('Neoplastic', 'Inflammatory', 'Stroma',
'Necrosis/Dead Cells', 'Normal Epithelial'),
palette=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 255, 255)])))
val_evaluator = dict(
type='CocoMetric',
ann_file=
'/home/chetan/Desktop/rtmdet_training/coco_finetuning_data/val/coco_annotations.json',
metric='bbox',
format_only=False,
backend_args=None,
proposal_nums=(3000, 1, 10))
test_evaluator = dict(
type='CocoMetric',
ann_file=
'/home/chetan/Desktop/rtmdet_training/coco_finetuning_data/val/coco_annotations.json',
metric='bbox',
format_only=False,
backend_args=None,
proposal_nums=(3000, 1, 10))
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=3000))
img_scales = [(1920, 1216), (256, 256)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[[{
'type': 'Resize',
'scale': (1920, 1216),
'keep_ratio': True,
'prob': 0.0
}, {
'type': 'Resize',
'scale': (144, 128),
'keep_ratio': True,
'prob': 0.0
}, {
'type': 'Resize',
'scale': (576, 512),
'keep_ratio': True,
'prob': 0.0
}],
[{
'type': 'RandomFlip',
'prob': 0.0
}, {
'type': 'RandomFlip',
'prob': 0.0
}],
[{
'type': 'Pad',
'size': (1920, 1216),
'pad_val': {
'img': (114, 114, 114)
},
'prob': 0.0
}],
[{
'type':
'PackDetInputs',
'meta_keys':
('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction')
}]])
]
model = dict(
type='RTMDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[179.92, 149.48, 198.26],
std=[14.06, 11.88, 11.06],
bgr_to_rgb=True,
batch_augments=None),
backbone=dict(
type='CSPNeXt',
arch='P5',
expand_ratio=0.5,
deepen_factor=0.67,
widen_factor=0.75,
channel_attention=True,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU', inplace=True)),
neck=dict(
type='CSPNeXtPAFPN',
in_channels=[192, 384, 768],
out_channels=192,
num_csp_blocks=2,
expand_ratio=0.5,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU', inplace=True)),
bbox_head=dict(
type='RTMDetSepBNHead',
num_classes=5,
in_channels=192,
stacked_convs=2,
feat_channels=192,
anchor_generator=dict(
type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]),
bbox_coder=dict(type='DistancePointBBoxCoder'),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
with_objectness=False,
exp_on_reg=True,
share_conv=True,
pred_kernel_size=1,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU', inplace=True)),
train_cfg=dict(
assigner=dict(type='DynamicSoftLabelAssigner', topk=13),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=30000,
min_bbox_size=0,
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=3000))
train_pipeline_stage2 = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=False,
poly2mask=False),
dict(
type='RandomResize',
scale=(1920, 1216),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(1920, 1216),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(1920, 1216), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
max_epochs = 300
stage2_num_epochs = 20
base_lr = 0.001
interval = 10
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=[
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=False,
poly2mask=False),
dict(
type='RandomResize',
scale=(1920, 1216),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(1920, 1216),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='YOLOXHSVRandomAug', prob=0.1),
dict(type='RandomFlip', prob=0.5),
dict(
type='Pad', size=(1920, 1216),
pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
])
]
metainfo = dict(
classes=('Neoplastic', 'Inflammatory', 'Stroma', 'Necrosis/Dead Cells',
'Normal Epithelial'),
palette=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 255, 255)])
launcher = 'none'
work_dir = './work_dirs/config_corrected_det_finetune'
The config file base_static.py
_base_ = ['../../_base_/onnx_config.py']
onnx_config = dict(output_names=['dets', 'labels'], input_shape=None)
codebase_config = dict(
type='mmdet',
task='ObjectDetection',
model_type='end2end',
post_processing=dict(
score_threshold=0.05,
confidence_threshold=0.005, # for YOLOv3
iou_threshold=0.6,
max_output_boxes_per_class=3000,
pre_top_k=5000,
keep_top_k=3000,
background_label_id=-1,
))
The tensorrt static optimisation
_base_ = ['./base_static.py', '../../_base_/backends/tensorrt.py']
onnx_config = dict(input_shape=(1920, 1216))
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 1216, 1920],
opt_shape=[1, 3, 1216, 1920],
max_shape=[1, 3, 1216, 1920])))
])
Below are the detail.json, pipeline.json and deploy.json
deploy.json
{
"version": "1.0.0",
"task": "Detector",
"models": [
{
"name": "rtmdet",
"net": "end2end.engine",
"weights": "",
"backend": "tensorrt",
"precision": "FP16",
"batch_size": 1,
"dynamic_shape": false
}
],
"customs": []
}
detail.json
{
"version": "1.0.0",
"codebase": {
"task": "ObjectDetection",
"codebase": "mmdet",
"version": "3.0.0",
"pth": "/root/workspace/data/finetune_checkpoint_static/epoch_240.pth",
"config": "/root/workspace/data/finetune_checkpoint_static/config_corrected_det_finetune.py"
},
"codebase_config": {
"type": "mmdet",
"task": "ObjectDetection",
"model_type": "end2end",
"post_processing": {
"score_threshold": 0.05,
"confidence_threshold": 0.005,
"iou_threshold": 0.6,
"max_output_boxes_per_class": 3000,
"pre_top_k": 5000,
"keep_top_k": 3000,
"background_label_id": -1
}
},
"onnx_config": {
"type": "onnx",
"export_params": true,
"keep_initializers_as_inputs": false,
"opset_version": 11,
"save_file": "end2end.onnx",
"input_names": [
"input"
],
"output_names": [
"dets",
"labels"
],
"input_shape": [
1920,
1216
],
"optimize": true
},
"backend_config": {
"type": "tensorrt",
"common_config": {
"fp16_mode": true,
"max_workspace_size": 1073741824
},
"model_inputs": [
{
"input_shapes": {
"input": {
"min_shape": [
1,
3,
1216,
1920
],
"opt_shape": [
1,
3,
1216,
1920
],
"max_shape": [
1,
3,
1216,
1920
]
}
}
}
]
},
"calib_config": {}
}
And finally the pipeline.json
{
"pipeline": {
"input": [
"img"
],
"output": [
"post_output"
],
"tasks": [
{
"type": "Task",
"module": "Transform",
"name": "Preprocess",
"input": [
"img"
],
"output": [
"prep_output"
],
"transforms": [
{
"type": "LoadImageFromFile",
"backend_args": null
},
{
"type": "Resize",
"keep_ratio": false,
"size": [
1920,
1216
]
},
{
"type": "Normalize",
"to_rgb": true,
"mean": [
179.92,
149.48,
198.26
],
"std": [
14.06,
11.88,
11.06
]
},
{
"type": "Pad",
"size_divisor": 1
},
{
"type": "DefaultFormatBundle"
},
{
"type": "Collect",
"meta_keys": [
"flip",
"img_shape",
"scale_factor",
"flip_direction",
"filename",
"img_path",
"img_id",
"img_norm_cfg",
"valid_ratio",
"pad_param",
"pad_shape",
"ori_filename",
"ori_shape"
],
"keys": [
"img"
]
}
]
},
{
"name": "rtmdet",
"type": "Task",
"module": "Net",
"is_batched": false,
"input": [
"prep_output"
],
"output": [
"infer_output"
],
"input_map": {
"img": "input"
},
"output_map": {}
},
{
"type": "Task",
"module": "mmdet",
"name": "postprocess",
"component": "ResizeBBox",
"params": {
"nms_pre": 30000,
"min_bbox_size": 0,
"score_thr": 0.001,
"nms": {
"type": "nms",
"iou_threshold": 0.6
},
"max_per_img": 3000
},
"output": [
"post_output"
],
"input": [
"prep_output",
"infer_output"
]
}
]
}
}
Reproduction
python /root/workspace/mmdeploy/tools/deploy.py \
/root/workspace/mmdeploy/configs/mmdet/detection/detection_tensorrt-fp16_static-AOI.py \
/root/workspace/data/finetune_checkpoint_static/config_corrected_det_finetune.py \
/root/workspace/data/finetune_checkpoint_static/epoch_240.pth \
/root/workspace/data/1105.png \
--test-img /root/workspace/data/1105.png \
--work-dir /root/workspace/data/finetune_checkpoint_static \
--device cuda \
--log-level INFO \
--show \
--dump-info
I had modified the config files to accomodate the resolution of 1216 x 1920. I understood all the changes required and the pytorch model works flawlessly. However, the TensorRT optimised model is unable to predict some objects which are densely located.
Environment
05/11 09:00:49 - mmengine - INFO -
05/11 09:00:49 - mmengine - INFO - **********Environmental information**********
05/11 09:00:50 - mmengine - INFO - sys.platform: linux
05/11 09:00:50 - mmengine - INFO - Python: 3.8.10 (default, Mar 15 2022, 12:22:08) [GCC 9.4.0]
05/11 09:00:50 - mmengine - INFO - CUDA available: True
05/11 09:00:50 - mmengine - INFO - numpy_random_seed: 2147483648
05/11 09:00:50 - mmengine - INFO - GPU 0: NVIDIA GeForce GTX 1650
05/11 09:00:50 - mmengine - INFO - CUDA_HOME: /usr/local/cuda
05/11 09:00:50 - mmengine - INFO - NVCC: Cuda compilation tools, release 11.6, V11.6.124
05/11 09:00:50 - mmengine - INFO - GCC: x86_64-linux-gnu-gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
05/11 09:00:50 - mmengine - INFO - PyTorch: 1.11.0+cu113
05/11 09:00:50 - mmengine - INFO - PyTorch compiling details: PyTorch built with:
- GCC 7.3
- C++ Version: 201402
- Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- LAPACK is enabled (usually provided by MKL)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.3
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
- CuDNN 8.2
- Magma 2.5.2
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF,
05/11 09:00:50 - mmengine - INFO - TorchVision: 0.12.0+cu113
05/11 09:00:50 - mmengine - INFO - OpenCV: 4.7.0
05/11 09:00:50 - mmengine - INFO - MMEngine: 0.7.3
05/11 09:00:50 - mmengine - INFO - MMCV: 2.0.0
05/11 09:00:50 - mmengine - INFO - MMCV Compiler: GCC 9.3
05/11 09:00:50 - mmengine - INFO - MMCV CUDA Compiler: 11.3
05/11 09:00:50 - mmengine - INFO - MMDeploy: 1.0.0+
05/11 09:00:50 - mmengine - INFO -
05/11 09:00:50 - mmengine - INFO - **********Backend information**********
05/11 09:00:50 - mmengine - INFO - tensorrt: 8.2.4.2
05/11 09:00:50 - mmengine - INFO - tensorrt custom ops: Available
05/11 09:00:50 - mmengine - INFO - ONNXRuntime: None
05/11 09:00:50 - mmengine - INFO - ONNXRuntime-gpu: 1.8.1
05/11 09:00:50 - mmengine - INFO - ONNXRuntime custom ops: Available
05/11 09:00:50 - mmengine - INFO - pplnn: None
05/11 09:00:50 - mmengine - INFO - ncnn: None
05/11 09:00:50 - mmengine - INFO - snpe: None
05/11 09:00:50 - mmengine - INFO - openvino: None
05/11 09:00:50 - mmengine - INFO - torchscript: 1.11.0+cu113
05/11 09:00:50 - mmengine - INFO - torchscript custom ops: NotAvailable
05/11 09:00:50 - mmengine - INFO - rknn-toolkit: None
05/11 09:00:50 - mmengine - INFO - rknn-toolkit2: None
05/11 09:00:50 - mmengine - INFO - ascend: None
05/11 09:00:50 - mmengine - INFO - coreml: None
05/11 09:00:50 - mmengine - INFO - tvm: None
05/11 09:00:50 - mmengine - INFO - vacc: None
05/11 09:00:50 - mmengine - INFO -
05/11 09:00:50 - mmengine - INFO - **********Codebase information**********
05/11 09:00:50 - mmengine - INFO - mmdet: 3.0.0
05/11 09:00:50 - mmengine - INFO - mmseg: None
05/11 09:00:50 - mmengine - INFO - mmpretrain: None
05/11 09:00:50 - mmengine - INFO - mmocr: None
05/11 09:00:50 - mmengine - INFO - mmedit: None
05/11 09:00:50 - mmengine - INFO - mmdet3d: None
05/11 09:00:50 - mmengine - INFO - mmpose: None
05/11 09:00:50 - mmengine - INFO - mmrotate: None
05/11 09:00:50 - mmengine - INFO - mmaction: None
05/11 09:00:50 - mmengine - INFO - mmrazor: None
### Error traceback
_No response_