pytorch/torch/_functorch/config.py
Michael Lazos 730e44bbc7 Add logging for aot autograd and unified debug flag (#88987)
- Adds `log_level` to aot's config
- Outputs log to `<graph_name>_<log_level>.log` in aot_torchinductor subfolder of the debug directory
- Modifies the Inductor debug context to use the graph name when naming the folder instead of the os pid
- Adds `TORCH_COMPILE_DEBUG` flag to enable it, (as well as separate dynamo and inductor logs)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/88987
Approved by: https://github.com/Chillee
2022-12-09 17:28:10 +00:00

38 lines
1.1 KiB
Python

# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Global flags for aot autograd
"""
import os
import logging
use_functionalize = True
# TODO Benchmark
use_fake_tensor = False
# Enables optional asserts in hotpath code to check for errors. If
# you are seeing weird accuracy problems, try turning this on.
# For now, to more easily identify bugs, this is turned on by default.
debug_assert = True
debug_fake_cross_ref = os.environ.get("AOT_FAKE_CROSSREF", False)
debug_partitioner = os.environ.get("AOT_PARTITIONER_DEBUG", False)
# Prints out forward + backwards FX graphs
debug_graphs = os.environ.get("AOT_FX_GRAPHS", False)
# Prints out joint graph traced, before partitioning
debug_joint = os.environ.get("AOT_FX_GRAPHS_JOINT", False)
use_dynamic_shapes = os.getenv("AOT_DYNAMIC_SHAPES", False)
static_weight_shapes = True
log_level = (
logging.DEBUG if debug_partitioner or debug_graphs or debug_joint else logging.INFO
)