Describe the issue
WebGPU and CPU execution providers produce different outputs given the same input.
To reproduce
import onnxruntime as ort
import numpy as np
from onnx import helper, TensorProto, numpy_helper
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, None, 160])
shape_node = helper.make_node("Shape", inputs=["X"], outputs=["shape_out"])
gather_node = helper.make_node("Gather", inputs=["shape_out", "index"], outputs=["dim1"], axis=0)
cast_to_float = helper.make_node("Cast", inputs=["dim1"], outputs=["dim1_float"], to=TensorProto.FLOAT)
div_node = helper.make_node("Div", inputs=["dim1_float", "divisor"], outputs=["div_out"])
ceil_node = helper.make_node("Ceil", inputs=["div_out"], outputs=["ceil_out"])
cast_to_int = helper.make_node("Cast", inputs=["ceil_out"], outputs=["Y"], to=TensorProto.INT64)
graph = helper.make_graph(
[shape_node, gather_node, cast_to_float, div_node, ceil_node, cast_to_int],
"shape_div_ceil_test",
[X],
[helper.make_tensor_value_info("Y", TensorProto.INT64, [])],
[
numpy_helper.from_array(np.int64(1), name="index"),
numpy_helper.from_array(np.float32(15.0), name="divisor"),
],
)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 19)])
model.ir_version = 9
model_bytes = model.SerializeToString()
feed = {"X": np.random.randn(1, 165, 160).astype(np.float32)}
opts = ort.SessionOptions()
# opts.log_severity_level = 3 # suppress warnings
for provider in ["CPUExecutionProvider", "WebGpuExecutionProvider"]:
sess = ort.InferenceSession(model_bytes, sess_options=opts, providers=[provider])
[result] = sess.run(None, feed)
label = "CPU" if "CPU" in provider else "GPU"
status = "PASS" if result == 11 else "FAIL"
print(f" {label}: Ceil(165/15) = {result} ({status})")
produces
CPU: Ceil(165/15) = 11 (PASS)
GPU: Ceil(165/15) = 12 (FAIL)
Urgency
No response
ONNX Runtime Installation
Built from Source
ONNX Runtime Version or Commit ID
main
Execution Provider
'webgpu' (WebGPU)
Describe the issue
WebGPU and CPU execution providers produce different outputs given the same input.
To reproduce
produces
Urgency
No response
ONNX Runtime Installation
Built from Source
ONNX Runtime Version or Commit ID
main
Execution Provider
'webgpu' (WebGPU)