From eb49369f045306208c557369240967eecf25aa14 Mon Sep 17 00:00:00 2001 From: "Bintang Alam Semesta W.A.M" <23573683+bintang-aswam@users.noreply.github.com> Date: Tue, 17 Jun 2025 20:56:40 +0700 Subject: [PATCH] Manually zero the gradients after updating weights by using machine epsilon for standard float (64-bit) Manually zero the gradients after updating weights by using machine epsilon for standard float (64-bit). --- .../examples_autograd/polynomial_autograd.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/beginner_source/examples_autograd/polynomial_autograd.py b/beginner_source/examples_autograd/polynomial_autograd.py index 525d0c33ce9..df81a7b650c 100755 --- a/beginner_source/examples_autograd/polynomial_autograd.py +++ b/beginner_source/examples_autograd/polynomial_autograd.py @@ -67,9 +67,12 @@ d -= learning_rate * d.grad # Manually zero the gradients after updating weights - a.grad = None - b.grad = None - c.grad = None - d.grad = None + # by using machine epsilon for standard float (64-bit) + import sys + + a.grad = loss*sys.float_info.epsilon + b.grad = loss*sys.float_info.epsilon + c.grad = loss*sys.float_info.epsilon + d.grad = loss*sys.float_info.epsilon print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')