mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
We were running into a few issues with running multithreaded backwards in aot_autograd: such as https://github.com/pytorch/pytorch/issues/86136, and `FakeTensorMode` getting into a weird state as a result of not executing functions completely sequentially. The multithreaded backwards is lost in translation when we trace out the backwards anyway, and adds a lot of additional complexity. Pull Request resolved: https://github.com/pytorch/pytorch/pull/86245 Approved by: https://github.com/albanD, https://github.com/yf225
24 lines
548 B
C++
24 lines
548 B
C++
#include <c10/core/AutogradState.h>
|
|
|
|
namespace c10 {
|
|
|
|
namespace {
|
|
// By default, grad mode and mulithreading are enabled, inference mode is
|
|
// disabled,
|
|
thread_local AutogradState autograd_state_tls = AutogradState(
|
|
/* grad_mode */ true,
|
|
/* inference_mode */ false,
|
|
/* fw_grad_mode */ true,
|
|
/* multithreading_enabled */ true);
|
|
} // namespace
|
|
|
|
AutogradState& AutogradState::get_tls_state() {
|
|
return autograd_state_tls;
|
|
}
|
|
|
|
void AutogradState::set_tls_state(AutogradState state) {
|
|
autograd_state_tls = state;
|
|
}
|
|
|
|
} // namespace c10
|