diff --git a/.ci/scripts/test_model.sh b/.ci/scripts/test_model.sh index bc9bbb8bae0..c8e2f708878 100755 --- a/.ci/scripts/test_model.sh +++ b/.ci/scripts/test_model.sh @@ -91,11 +91,10 @@ test_model() { run_portable_executor_runner rm "./${MODEL_NAME}.pte" fi - STRICT="--strict" + STRICT="--no-strict" if [[ "${MODEL_NAME}" == "llava" ]]; then # Install requirements for llava bash examples/models/llava/install_requirements.sh - STRICT="--no-strict" fi if [[ "${MODEL_NAME}" == "qwen2_5" ]]; then # Install requirements for export_llama diff --git a/examples/xnnpack/aot_compiler.py b/examples/xnnpack/aot_compiler.py index f67150169dc..79496c82a58 100644 --- a/examples/xnnpack/aot_compiler.py +++ b/examples/xnnpack/aot_compiler.py @@ -87,14 +87,14 @@ model = model.eval() # pre-autograd export. eventually this will become torch.export - ep = torch.export.export_for_training(model, example_inputs, strict=True) + ep = torch.export.export_for_training(model, example_inputs, strict=False) model = ep.module() if args.quantize: logging.info("Quantizing Model...") # TODO(T165162973): This pass shall eventually be folded into quantizer model = quantize(model, example_inputs, quant_type) - ep = torch.export.export_for_training(model, example_inputs, strict=True) + ep = torch.export.export_for_training(model, example_inputs, strict=False) edge = to_edge_transform_and_lower( ep,