Skip to content

Commit 8f0518c

Browse files
svekarstitaiwangms
andauthored
Cherry-pick: [ONNX] Update dynamo_export tutorial (#3196) (#3265)
Co-authored-by: Ti-Tai Wang <titaiwang@microsoft.com>
1 parent c2faee4 commit 8f0518c

12 files changed

+15
-159
lines changed

.jenkins/validate_tutorials_built.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,6 @@
5252
"intermediate_source/tensorboard_profiler_tutorial", # reenable after 2.0 release.
5353
"intermediate_source/torch_export_tutorial", # reenable after 2940 is fixed.
5454
"advanced_source/pendulum",
55-
"beginner_source/onnx/export_simple_model_to_onnx_tutorial",
56-
"beginner_source/onnx/onnx_registry_tutorial"
5755
]
5856

5957
def tutorial_source_dirs() -> List[Path]:
38.1 KB
Loading
Binary file not shown.
-7.37 KB
Binary file not shown.
-15.9 KB
Binary file not shown.
-8.41 KB
Binary file not shown.
-381 Bytes
Loading
Loading
Binary file not shown.

beginner_source/onnx/export_simple_model_to_onnx_tutorial.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
Export a PyTorch model to ONNX
88
==============================
99
10-
**Author**: `Thiago Crepaldi <https://github.com/thiagocrepaldi>`_
10+
**Author**: `Ti-Tai Wang <https://github.com/titaiwangms>`_ and `Xavier Dupré <https://github.com/xadupre>`_
1111
1212
.. note::
1313
As of PyTorch 2.1, there are two versions of ONNX Exporter.
@@ -127,7 +127,7 @@ def forward(self, x):
127127
# Once Netron is open, we can drag and drop our ``my_image_classifier.onnx`` file into the browser or select it after
128128
# clicking the **Open model** button.
129129
#
130-
# .. image:: ../../_static/img/onnx/image_clossifier_onnx_modelon_netron_web_ui.png
130+
# .. image:: ../../_static/img/onnx/image_classifier_onnx_model_on_netron_web_ui.png
131131
# :width: 50%
132132
#
133133
#
@@ -155,7 +155,7 @@ def forward(self, x):
155155

156156
import onnxruntime
157157

158-
onnx_input = onnx_program.adapt_torch_inputs_to_onnx(torch_input)
158+
onnx_input = [torch_input]
159159
print(f"Input length: {len(onnx_input)}")
160160
print(f"Sample input: {onnx_input}")
161161

@@ -166,7 +166,8 @@ def to_numpy(tensor):
166166

167167
onnxruntime_input = {k.name: to_numpy(v) for k, v in zip(ort_session.get_inputs(), onnx_input)}
168168

169-
onnxruntime_outputs = ort_session.run(None, onnxruntime_input)
169+
# onnxruntime returns a list of outputs
170+
onnxruntime_outputs = ort_session.run(None, onnxruntime_input)[0]
170171

171172
####################################################################
172173
# 7. Compare the PyTorch results with the ones from the ONNX Runtime
@@ -179,7 +180,6 @@ def to_numpy(tensor):
179180
# Before comparing the results, we need to convert the PyTorch's output to match ONNX's format.
180181

181182
torch_outputs = torch_model(torch_input)
182-
torch_outputs = onnx_program.adapt_torch_outputs_to_onnx(torch_outputs)
183183

184184
assert len(torch_outputs) == len(onnxruntime_outputs)
185185
for torch_output, onnxruntime_output in zip(torch_outputs, onnxruntime_outputs):

0 commit comments

Comments
 (0)