From d37a7fbdd551440937c4846ef2f44f715f046ff9 Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Thu, 12 Jun 2025 19:16:18 +0000 Subject: [PATCH 1/8] update. --- beginner_source/basics/autogradqs_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index 8eff127ddee..e702b29810f 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -116,7 +116,7 @@ with torch.no_grad(): z = torch.matmul(x, w)+b -print(z.requires_grad) +prin(z.requires_grad) ###################################################################### From ad5eb25b139fc3bfd9a01a020b8d220093b13f3d Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Fri, 13 Jun 2025 09:38:48 +0000 Subject: [PATCH 2/8] update the autograd tutorial. --- beginner_source/basics/autogradqs_tutorial.py | 38 ++++++++++++++++++- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index e702b29810f..418de49cc6f 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -116,7 +116,7 @@ with torch.no_grad(): z = torch.matmul(x, w)+b -prin(z.requires_grad) +print(z.requires_grad) ###################################################################### @@ -133,7 +133,8 @@ # - To mark some parameters in your neural network as **frozen parameters**. # - To **speed up computations** when you are only doing forward pass, because computations on tensors that do # not track gradients would be more efficient. - +# For additional reference, you can view the autograd mechanics +# documentation:https://docs.pytorch.org/docs/stable/notes/autograd.html#locally-disabling-gradient-computation ###################################################################### @@ -160,6 +161,39 @@ # - accumulates them in the respective tensor’s ``.grad`` attribute # - using the chain rule, propagates all the way to the leaf tensors. # +# We can also visualize the computational graph by the following 2 methods: +# +# 1. TORCH_LOGS="+autograd" +# By setting the TORCH_LOGS="+autograd" environment variable, we can enable runtime autograd logs for debugging. +# +# We can perform the logging in the following manner: +# TORCH_LOGS="+autograd" python test.py +# +# 2. Torchviz +# Torchviz is a package to render the computational graph visually. +# +# We can generate an image for the computational graph in the example given below: +# +# import torch +# from torch import nn +# from torchviz import make_dot +# +# model = nn.Sequential( +# nn.Linear(8, 16), +# nn.ReLU(), +# nn.Linear(16, 1) +# ) + +# x = torch.randn(1, 8, requires_grad=True) +# y = model(x).mean() + +# log the internal operations using torchviz +# import os +# os.environ['TORCH_LOGS'] = "+autograd" + +# dot = make_dot(y, params=dict(model.named_parameters()), show_attrs=True, show_saved=True) +# dot.render('simple_graph', format='png') +# # .. note:: # **DAGs are dynamic in PyTorch** # An important thing to note is that the graph is recreated from scratch; after each From 1532c0de875b79f541371438d8d1d08be18f87de Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Fri, 13 Jun 2025 13:40:05 +0000 Subject: [PATCH 3/8] update the tutorial. --- beginner_source/basics/autogradqs_tutorial.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index 418de49cc6f..671ed67c817 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -32,7 +32,7 @@ y = torch.zeros(3) # expected output w = torch.randn(5, 3, requires_grad=True) b = torch.randn(3, requires_grad=True) -z = torch.matmul(x, w)+b +z = torch.matmul(x, w) + b loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y) @@ -133,7 +133,7 @@ # - To mark some parameters in your neural network as **frozen parameters**. # - To **speed up computations** when you are only doing forward pass, because computations on tensors that do # not track gradients would be more efficient. -# For additional reference, you can view the autograd mechanics +# For additional reference, you can view the autograd mechanics # documentation:https://docs.pytorch.org/docs/stable/notes/autograd.html#locally-disabling-gradient-computation ###################################################################### @@ -171,7 +171,7 @@ # # 2. Torchviz # Torchviz is a package to render the computational graph visually. -# +# # We can generate an image for the computational graph in the example given below: # # import torch From c11b361253044c1fbca78d4a5915a271c070b5e1 Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Thu, 26 Jun 2025 06:13:30 +0000 Subject: [PATCH 4/8] update the tutorial. --- beginner_source/basics/autogradqs_tutorial.py | 37 +++---------------- 1 file changed, 6 insertions(+), 31 deletions(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index 671ed67c817..a3c89789086 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -133,8 +133,7 @@ # - To mark some parameters in your neural network as **frozen parameters**. # - To **speed up computations** when you are only doing forward pass, because computations on tensors that do # not track gradients would be more efficient. -# For additional reference, you can view the autograd mechanics -# documentation:https://docs.pytorch.org/docs/stable/notes/autograd.html#locally-disabling-gradient-computation +# See this `note` for additional reference. ###################################################################### @@ -161,38 +160,14 @@ # - accumulates them in the respective tensor’s ``.grad`` attribute # - using the chain rule, propagates all the way to the leaf tensors. # -# We can also visualize the computational graph by the following 2 methods: +# To get a sense of what this computational graph looks like we can use the following tools: # -# 1. TORCH_LOGS="+autograd" -# By setting the TORCH_LOGS="+autograd" environment variable, we can enable runtime autograd logs for debugging. +# 1. torchviz is a package to visualize computational graphs +# # -# We can perform the logging in the following manner: -# TORCH_LOGS="+autograd" python test.py +# 2. TORCH_LOGS="+autograd" enables logging for the backward pass. +# # -# 2. Torchviz -# Torchviz is a package to render the computational graph visually. -# -# We can generate an image for the computational graph in the example given below: -# -# import torch -# from torch import nn -# from torchviz import make_dot -# -# model = nn.Sequential( -# nn.Linear(8, 16), -# nn.ReLU(), -# nn.Linear(16, 1) -# ) - -# x = torch.randn(1, 8, requires_grad=True) -# y = model(x).mean() - -# log the internal operations using torchviz -# import os -# os.environ['TORCH_LOGS'] = "+autograd" - -# dot = make_dot(y, params=dict(model.named_parameters()), show_attrs=True, show_saved=True) -# dot.render('simple_graph', format='png') # # .. note:: # **DAGs are dynamic in PyTorch** From 86cf7022004d4f218bbfa9cfdba48769c384d871 Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Thu, 26 Jun 2025 06:14:29 +0000 Subject: [PATCH 5/8] update. --- beginner_source/basics/autogradqs_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index a3c89789086..107ff3cd2bc 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -32,7 +32,7 @@ y = torch.zeros(3) # expected output w = torch.randn(5, 3, requires_grad=True) b = torch.randn(3, requires_grad=True) -z = torch.matmul(x, w) + b +z = torch.matmul(x, w)+b loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y) From a77d137f9b8f2adc45a573688f6be04427e03471 Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Mon, 30 Jun 2025 13:52:56 +0000 Subject: [PATCH 6/8] update link syntax. --- beginner_source/basics/autogradqs_tutorial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index 107ff3cd2bc..e7ea62b2def 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -163,10 +163,10 @@ # To get a sense of what this computational graph looks like we can use the following tools: # # 1. torchviz is a package to visualize computational graphs -# +# `` # # 2. TORCH_LOGS="+autograd" enables logging for the backward pass. -# +# `` # # # .. note:: From 4b736a5837d0f437fae7123af52b70e7dff22ccf Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Mon, 30 Jun 2025 15:16:58 +0000 Subject: [PATCH 7/8] use the rst syntax. --- beginner_source/basics/autogradqs_tutorial.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index e7ea62b2def..7289d0084ad 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -133,7 +133,8 @@ # - To mark some parameters in your neural network as **frozen parameters**. # - To **speed up computations** when you are only doing forward pass, because computations on tensors that do # not track gradients would be more efficient. -# See this `note` for additional reference. +# See this `note `_ +# for additional reference. ###################################################################### @@ -163,10 +164,10 @@ # To get a sense of what this computational graph looks like we can use the following tools: # # 1. torchviz is a package to visualize computational graphs -# `` +# `pytorchviz `_ # # 2. TORCH_LOGS="+autograd" enables logging for the backward pass. -# `` +# ``_ # # # .. note:: From b9576f7baccd59ecc7d191c0c942225f527bd993 Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Tue, 1 Jul 2025 15:05:29 +0000 Subject: [PATCH 8/8] fix:link syntax. --- beginner_source/basics/autogradqs_tutorial.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index 7289d0084ad..2753103eaa8 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -133,7 +133,7 @@ # - To mark some parameters in your neural network as **frozen parameters**. # - To **speed up computations** when you are only doing forward pass, because computations on tensors that do # not track gradients would be more efficient. -# See this `note `_ +# See this `note `_ # for additional reference. ###################################################################### @@ -163,11 +163,12 @@ # # To get a sense of what this computational graph looks like we can use the following tools: # -# 1. torchviz is a package to visualize computational graphs -# `pytorchviz `_ +#1. torchviz is a package to visualize computational graphs. +# See the repository here: `https://github.com/szagoruyko/pytorchviz `_ +# +#2. Setting ``TORCH_LOGS="+autograd"`` enables logging for the backward pass. See details in this +# discussion: `https://dev-discuss.pytorch.org/t/highlighting-a-few-recent-autograd-features-h2-2023/1787 `_ # -# 2. TORCH_LOGS="+autograd" enables logging for the backward pass. -# ``_ # # # .. note::