From c16b6458d4f75616e6c37489240588bef90e296a Mon Sep 17 00:00:00 2001 From: Mergen Nachin Date: Thu, 10 Jul 2025 17:22:00 -0400 Subject: [PATCH] Run test_models.sh with strict=False flag This is the recommended anyway, so let's gradually start migrating to strict=False --- .ci/scripts/test_model.sh | 3 +-- examples/xnnpack/aot_compiler.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.ci/scripts/test_model.sh b/.ci/scripts/test_model.sh index bc9bbb8bae0..c8e2f708878 100755 --- a/.ci/scripts/test_model.sh +++ b/.ci/scripts/test_model.sh @@ -91,11 +91,10 @@ test_model() { run_portable_executor_runner rm "./${MODEL_NAME}.pte" fi - STRICT="--strict" + STRICT="--no-strict" if [[ "${MODEL_NAME}" == "llava" ]]; then # Install requirements for llava bash examples/models/llava/install_requirements.sh - STRICT="--no-strict" fi if [[ "${MODEL_NAME}" == "qwen2_5" ]]; then # Install requirements for export_llama diff --git a/examples/xnnpack/aot_compiler.py b/examples/xnnpack/aot_compiler.py index f67150169dc..79496c82a58 100644 --- a/examples/xnnpack/aot_compiler.py +++ b/examples/xnnpack/aot_compiler.py @@ -87,14 +87,14 @@ model = model.eval() # pre-autograd export. eventually this will become torch.export - ep = torch.export.export_for_training(model, example_inputs, strict=True) + ep = torch.export.export_for_training(model, example_inputs, strict=False) model = ep.module() if args.quantize: logging.info("Quantizing Model...") # TODO(T165162973): This pass shall eventually be folded into quantizer model = quantize(model, example_inputs, quant_type) - ep = torch.export.export_for_training(model, example_inputs, strict=True) + ep = torch.export.export_for_training(model, example_inputs, strict=False) edge = to_edge_transform_and_lower( ep,