Revert "fix compiled_autograd deadlock throw (#135795)"

This reverts commit 00dc7d4356.

Reverted https://github.com/pytorch/pytorch/pull/135795 on behalf of https://github.com/facebook-github-bot due to Diff reverted internally ([comment](https://github.com/pytorch/pytorch/pull/135795#issuecomment-2354233619))
This commit is contained in:
PyTorch MergeBot 2024-09-16 23:59:56 +00:00
parent 071da87cd7
commit 37a08b33bb
3 changed files with 7 additions and 9 deletions

View File

@ -2686,7 +2686,10 @@ TORCH_LIBRARY(test_cudagraphs_cpu_scalar_used_in_cpp_custom_op, m) {
inp = torch.rand(10, 10, requires_grad=True)
out = torch.utils.checkpoint.checkpoint(fn, inp, use_reentrant=True)
with torch._dynamo.compiled_autograd.enable(torch.compile):
with self.assertRaisesRegex(
RuntimeError,
r"\(e.g. reentrant checkpointing\), this is not supported yet\.",
), torch._dynamo.compiled_autograd.enable(torch.compile):
out.backward()

View File

@ -525,10 +525,6 @@ def disable():
torch._C._dynamo.compiled_autograd.set_autograd_compiler(prior)
def maybe_disable_compiled_autograd():
return disable() if in_compiled_autograd_region else contextlib.nullcontext()
# return to starting state of a new process
def reset() -> None:
compiled_autograd_enable = False

View File

@ -822,10 +822,9 @@ def _engine_run_backward(
if attach_logging_hooks:
unregister_hooks = _register_logging_hooks_on_whole_graph(t_outputs)
try:
with torch._dynamo.compiled_autograd.maybe_disable_compiled_autograd():
return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
t_outputs, *args, **kwargs
) # Calls into the C++ engine to run the backward pass
return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
t_outputs, *args, **kwargs
) # Calls into the C++ engine to run the backward pass
finally:
if attach_logging_hooks:
unregister_hooks() # type: ignore[possibly-undefined]