mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
This diff moves export run_decompositions to use aot_export_joint_with_descriptors instead of aot_export_module. Doing so, i ran into 2 main bugs: 1) aot_export_joint_with_descriptors don't correctly pass in record_nn_module_stack flag that is needed to populate nn_module_stack by switching the internal tracer. 2) When creating symint with negative inputs, we need to pass in positive=False. This didn't matter before because aot_autograd directly returns integer inputs instead of creating symint. Pull Request resolved: https://github.com/pytorch/pytorch/pull/165931 Approved by: https://github.com/zhxchen17
43 lines
1.3 KiB
Python
43 lines
1.3 KiB
Python
"""
|
|
Configuration module for torch.export.export.
|
|
|
|
This module contains various configuration flags and settings that control torch.export's
|
|
behavior, including:
|
|
- Runtime behavior flags
|
|
- Debugging and development options
|
|
"""
|
|
|
|
import sys
|
|
from typing import Any, TYPE_CHECKING
|
|
|
|
from torch._environment import is_fbcode
|
|
from torch.utils._config_module import install_config_module
|
|
|
|
|
|
# this flag controls whether we use new functional tracer. It
|
|
# should be True in the long term.
|
|
use_new_tracer_experimental = True
|
|
|
|
# this flag is used to control whether we want to instrument
|
|
# fake tensor creation to track potential leaks. It is off
|
|
# by default, but user can turn it on to debug leaks.
|
|
detect_non_strict_fake_tensor_leaks = False
|
|
|
|
# error on potentially pre-dispatch/non-strict tracing limitation
|
|
# this type of error usually happens when we encounter an op
|
|
# that we don't know how to proxy, resulting in untracked fake tensors
|
|
error_on_lifted_constant_tensors = True
|
|
|
|
# enable auto_functionalized_v2 in export
|
|
# We turn this off in fbcode due to downstream users not
|
|
# being ready to handle auto_functionalized_v2.
|
|
enable_auto_functionalized_v2_for_export = not is_fbcode()
|
|
|
|
if TYPE_CHECKING:
|
|
from torch.utils._config_typing import * # noqa: F401, F403
|
|
|
|
def _make_closure_patcher(**changes: Any) -> Any: ...
|
|
|
|
|
|
install_config_module(sys.modules[__name__])
|