Skip to content

Commit 01315e5

Browse files
snnnadrianlizarraga
authored andcommitted
Move Linux CUDA pipelines to H100 (#25523)
1 parent d3d3834 commit 01315e5

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

.github/workflows/linux_cuda_ci.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ jobs:
2828
dockerfile_path: tools/ci_build/github/linux/docker/Dockerfile.manylinux2_28_cuda
2929
docker_build_args: '--build-arg BASEIMAGE=onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda12_x64_ubi8_gcc12:20250124.1'
3030
docker_image_repo: onnxruntimecuda12manylinuxbuild
31-
extra_build_flags: '--use_binskim_compliant_compile_flags --build_wheel --cuda_version=12.2 --cuda_home=/usr/local/cuda-12.2 --cudnn_home=/usr/local/cuda-12.2 --enable_cuda_profiling --build_java --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=80 onnxruntime_BUILD_UNIT_TESTS=ON onnxruntime_ENABLE_CUDA_EP_INTERNAL_TESTS=ON'
31+
extra_build_flags: '--use_binskim_compliant_compile_flags --build_wheel --cuda_version=12.2 --cuda_home=/usr/local/cuda-12.2 --cudnn_home=/usr/local/cuda-12.2 --enable_cuda_profiling --build_java --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=90 onnxruntime_BUILD_UNIT_TESTS=ON onnxruntime_ENABLE_CUDA_EP_INTERNAL_TESTS=ON'
3232
python_path_prefix: 'PATH=/opt/python/cp310-cp310/bin:$PATH'
3333
run_tests: false # <<< Do not run tests in this job
3434
upload_build_output: true # <<< Upload the build/Release directory
@@ -41,7 +41,7 @@ jobs:
4141
needs: build-linux-cuda-x64-release
4242
runs-on:
4343
- self-hosted
44-
- "1ES.Pool=Onnxruntime-github-Linux-GPU-A100-WUS3"
44+
- "1ES.Pool=Onnxruntime-github-Linux-GPU-H100"
4545
permissions:
4646
contents: read
4747
packages: read
@@ -98,5 +98,5 @@ jobs:
9898
build_config: Release
9999
mode: 'test' # Set mode to test
100100
execution_providers: 'cuda'
101-
extra_build_flags: '--use_binskim_compliant_compile_flags --cuda_version=12.2 --cuda_home=/usr/local/cuda-12.2 --cudnn_home=/usr/local/cuda-12.2 --enable_cuda_profiling --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=80 onnxruntime_BUILD_UNIT_TESTS=ON onnxruntime_ENABLE_CUDA_EP_INTERNAL_TESTS=ON'
101+
extra_build_flags: '--use_binskim_compliant_compile_flags --cuda_version=12.2 --cuda_home=/usr/local/cuda-12.2 --cudnn_home=/usr/local/cuda-12.2 --enable_cuda_profiling --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=90 onnxruntime_BUILD_UNIT_TESTS=ON onnxruntime_ENABLE_CUDA_EP_INTERNAL_TESTS=ON'
102102
python_path_prefix: 'PATH=/opt/python/cp310-cp310/bin:$PATH'

.github/workflows/linux_tensorrt_ci.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ jobs:
2828
dockerfile_path: tools/ci_build/github/linux/docker/Dockerfile.manylinux2_28_cuda
2929
docker_build_args: '--build-arg BASEIMAGE=onnxruntimebuildcache.azurecr.io/internal/azureml/onnxruntime/build/cuda12_x64_ubi8_gcc12:20250124.1 --build-arg TRT_VERSION=10.9.0.34-1.cuda12.8 --network=host'
3030
docker_image_repo: onnxruntimetensorrt86gpubuild
31-
extra_build_flags: '--use_binskim_compliant_compile_flags --build_wheel --cuda_version=12.2 --cuda_home=/usr/local/cuda-12.2 --cudnn_home=/usr/local/cuda-12.2 --use_tensorrt --tensorrt_home /usr --build_java --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=80 onnxruntime_BUILD_UNIT_TESTS=ON onnxruntime_ENABLE_CUDA_EP_INTERNAL_TESTS=ON'
31+
extra_build_flags: '--use_binskim_compliant_compile_flags --build_wheel --cuda_version=12.2 --cuda_home=/usr/local/cuda-12.2 --cudnn_home=/usr/local/cuda-12.2 --use_tensorrt --tensorrt_home /usr --build_java --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=90 onnxruntime_BUILD_UNIT_TESTS=ON onnxruntime_ENABLE_CUDA_EP_INTERNAL_TESTS=ON'
3232
python_path_prefix: 'PATH=/opt/python/cp310-cp310/bin:$PATH'
3333
run_tests: false # <<< Do not run tests in this job
3434
upload_build_output: true # <<< Upload the build/Release directory
@@ -41,7 +41,7 @@ jobs:
4141
needs: build-linux-TensorRT-x64-release
4242
runs-on:
4343
- self-hosted
44-
- "1ES.Pool=Onnxruntime-github-Linux-GPU-A100-WUS3"
44+
- "1ES.Pool=Onnxruntime-github-Linux-GPU-H100"
4545
permissions:
4646
contents: read
4747
packages: read
@@ -100,5 +100,5 @@ jobs:
100100
build_config: Release
101101
mode: 'test' # Set mode to test
102102
execution_providers: 'cuda tensorrt'
103-
extra_build_flags: '--use_binskim_compliant_compile_flags --build_wheel --cuda_version=12.2 --cuda_home=/usr/local/cuda-12.2 --cudnn_home=/usr/local/cuda-12.2 --use_tensorrt --tensorrt_home /usr --build_java --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=80 onnxruntime_BUILD_UNIT_TESTS=ON onnxruntime_ENABLE_CUDA_EP_INTERNAL_TESTS=ON'
103+
extra_build_flags: '--use_binskim_compliant_compile_flags --build_wheel --cuda_version=12.2 --cuda_home=/usr/local/cuda-12.2 --cudnn_home=/usr/local/cuda-12.2 --use_tensorrt --tensorrt_home /usr --build_java --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=90 onnxruntime_BUILD_UNIT_TESTS=ON onnxruntime_ENABLE_CUDA_EP_INTERNAL_TESTS=ON'
104104
python_path_prefix: 'PATH=/opt/python/cp310-cp310/bin:$PATH'

onnxruntime/test/providers/cuda/nhwc/nhwc_cuda_helper.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
} else if (std::is_same<T, double>::value) { \
4747
MAKE_PROVIDERS_EPS_EXT(2e-4, pad_to_nc1d) \
4848
} else { \
49-
MAKE_PROVIDERS_EPS_EXT(2e-3, pad_to_nc1d) \
49+
MAKE_PROVIDERS_EPS_EXT(4e-3, pad_to_nc1d) \
5050
}
5151

5252
#define MAKE_PROVIDERS_EPS_TYPE(T) \

0 commit comments

Comments
 (0)