Skip to content

Commit c16b645

Browse files
committed
Run test_models.sh with strict=False flag
This is the recommended anyway, so let's gradually start migrating to strict=False
1 parent a1e3d48 commit c16b645

File tree

2 files changed

+3
-4
lines changed

2 files changed

+3
-4
lines changed

.ci/scripts/test_model.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,11 +91,10 @@ test_model() {
9191
run_portable_executor_runner
9292
rm "./${MODEL_NAME}.pte"
9393
fi
94-
STRICT="--strict"
94+
STRICT="--no-strict"
9595
if [[ "${MODEL_NAME}" == "llava" ]]; then
9696
# Install requirements for llava
9797
bash examples/models/llava/install_requirements.sh
98-
STRICT="--no-strict"
9998
fi
10099
if [[ "${MODEL_NAME}" == "qwen2_5" ]]; then
101100
# Install requirements for export_llama

examples/xnnpack/aot_compiler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,14 +87,14 @@
8787

8888
model = model.eval()
8989
# pre-autograd export. eventually this will become torch.export
90-
ep = torch.export.export_for_training(model, example_inputs, strict=True)
90+
ep = torch.export.export_for_training(model, example_inputs, strict=False)
9191
model = ep.module()
9292

9393
if args.quantize:
9494
logging.info("Quantizing Model...")
9595
# TODO(T165162973): This pass shall eventually be folded into quantizer
9696
model = quantize(model, example_inputs, quant_type)
97-
ep = torch.export.export_for_training(model, example_inputs, strict=True)
97+
ep = torch.export.export_for_training(model, example_inputs, strict=False)
9898

9999
edge = to_edge_transform_and_lower(
100100
ep,

0 commit comments

Comments
 (0)