From d90d83c484a44fe762aee29e48b7d07af6df5ab7 Mon Sep 17 00:00:00 2001 From: Ivan Grigorev Date: Tue, 25 Mar 2025 14:46:09 +0000 Subject: [PATCH] [torch] Fix unsafe concurrent access to autocast_enabled (#148281) Summary: Making autocast_enabled atomic, as it can be accessed from multiple threads Differential Revision: D70456813 Pull Request resolved: https://github.com/pytorch/pytorch/pull/148281 Approved by: https://github.com/davidberard98 --- torch/csrc/jit/passes/autocast.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/torch/csrc/jit/passes/autocast.cpp b/torch/csrc/jit/passes/autocast.cpp index 1d5cb636e45..4699cceec5b 100644 --- a/torch/csrc/jit/passes/autocast.cpp +++ b/torch/csrc/jit/passes/autocast.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -17,7 +18,7 @@ namespace torch::jit { namespace { -bool autocast_enabled = true; +std::atomic autocast_enabled = true; struct AutocastContext { bool gpu_enabled = false; @@ -509,9 +510,7 @@ void handleBlock(Block* block, AutocastContext initial_state) { } // namespace bool setAutocastMode(bool value) { - auto old_value = autocast_enabled; - autocast_enabled = value; - return old_value; + return autocast_enabled.exchange(value); } bool autocastEnabled() {