From 5536bf327ce2297aad5b0072be95a5c1f0adfd93 Mon Sep 17 00:00:00 2001 From: "Cui, Yifeng" Date: Mon, 23 Jun 2025 23:12:20 -0700 Subject: [PATCH] Set strict export explicitly for API change --- docs/source/tutorials_source/pt2e_quant_x86_inductor.rst | 5 +++-- docs/source/tutorials_source/pt2e_quant_xpu_inductor.rst | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/source/tutorials_source/pt2e_quant_x86_inductor.rst b/docs/source/tutorials_source/pt2e_quant_x86_inductor.rst index e4faec469f..f968032fa3 100644 --- a/docs/source/tutorials_source/pt2e_quant_x86_inductor.rst +++ b/docs/source/tutorials_source/pt2e_quant_x86_inductor.rst @@ -104,7 +104,8 @@ We will start by performing the necessary imports, capturing the FX Graph from t # Note: requires torch >= 2.6 exported_model = export( model, - example_inputs + example_inputs, + strict=True ) @@ -266,7 +267,7 @@ The PyTorch 2 Export QAT flow is largely similar to the PTQ flow: # Step 1. program capture # NOTE: this API will be updated to torch.export API in the future, but the captured # result shoud mostly stay the same - exported_model = export(m, example_inputs) + exported_model = export(m, example_inputs, strict=True) # we get a model with aten ops # Step 2. quantization-aware training diff --git a/docs/source/tutorials_source/pt2e_quant_xpu_inductor.rst b/docs/source/tutorials_source/pt2e_quant_xpu_inductor.rst index a0901291e9..d1540a9777 100644 --- a/docs/source/tutorials_source/pt2e_quant_xpu_inductor.rst +++ b/docs/source/tutorials_source/pt2e_quant_xpu_inductor.rst @@ -85,6 +85,7 @@ We will start by performing the necessary imports, capturing the FX Graph from t exported_model = export( model, example_inputs, + strict=True ).module()