mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
This reverts commit 882b834082.
Reverted https://github.com/pytorch/pytorch/pull/165931 on behalf of https://github.com/clee2000 due to breaking internal tests D85084301 for test_auto_functionalize? I checked that they did run on OSS CI so I'm not entirely sure whats going on, I assume its the IS_FBCODE stuff ([comment](https://github.com/pytorch/pytorch/pull/165931#issuecomment-3443887361))
37 lines
1.1 KiB
Python
37 lines
1.1 KiB
Python
"""
|
|
Configuration module for torch.export.export.
|
|
|
|
This module contains various configuration flags and settings that control torch.export's
|
|
behavior, including:
|
|
- Runtime behavior flags
|
|
- Debugging and development options
|
|
"""
|
|
|
|
import sys
|
|
from typing import Any, TYPE_CHECKING
|
|
|
|
from torch.utils._config_module import install_config_module
|
|
|
|
|
|
# this flag controls whether we use new functional tracer. It
|
|
# should be True in the long term.
|
|
use_new_tracer_experimental = True
|
|
|
|
# this flag is used to control whether we want to instrument
|
|
# fake tensor creation to track potential leaks. It is off
|
|
# by default, but user can turn it on to debug leaks.
|
|
detect_non_strict_fake_tensor_leaks = False
|
|
|
|
# error on potentially pre-dispatch/non-strict tracing limitation
|
|
# this type of error usually happens when we encounter an op
|
|
# that we don't know how to proxy, resulting in untracked fake tensors
|
|
error_on_lifted_constant_tensors = True
|
|
|
|
if TYPE_CHECKING:
|
|
from torch.utils._config_typing import * # noqa: F401, F403
|
|
|
|
def _make_closure_patcher(**changes: Any) -> Any: ...
|
|
|
|
|
|
install_config_module(sys.modules[__name__])
|