mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[torch] Fix unsafe concurrent access to autocast_enabled (#148281)
Summary: Making autocast_enabled atomic, as it can be accessed from multiple threads Differential Revision: D70456813 Pull Request resolved: https://github.com/pytorch/pytorch/pull/148281 Approved by: https://github.com/davidberard98
This commit is contained in:
parent
a2bba53f87
commit
d90d83c484
|
|
@ -7,6 +7,7 @@
|
|||
#include <torch/csrc/jit/ir/ir.h>
|
||||
#include <torch/csrc/jit/jit_log.h>
|
||||
#include <torch/csrc/jit/passes/quantization/helper.h>
|
||||
#include <atomic>
|
||||
#include <optional>
|
||||
|
||||
#include <stack>
|
||||
|
|
@ -17,7 +18,7 @@ namespace torch::jit {
|
|||
|
||||
namespace {
|
||||
|
||||
bool autocast_enabled = true;
|
||||
std::atomic<bool> autocast_enabled = true;
|
||||
|
||||
struct AutocastContext {
|
||||
bool gpu_enabled = false;
|
||||
|
|
@ -509,9 +510,7 @@ void handleBlock(Block* block, AutocastContext initial_state) {
|
|||
} // namespace
|
||||
|
||||
bool setAutocastMode(bool value) {
|
||||
auto old_value = autocast_enabled;
|
||||
autocast_enabled = value;
|
||||
return old_value;
|
||||
return autocast_enabled.exchange(value);
|
||||
}
|
||||
|
||||
bool autocastEnabled() {
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user