mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
---- - We now record on CacheEntry what the compile id that populated it was, so now we can say why a specific frame was rejected - Add structured log for recompiles under name artifact "recompile_reasons". As it stands, it's not terribly structured, but this was the easiest thing I could do to start - Slightly reformat multi-reason printing; since we only report one guard failure seems better to have it as a single line Example output: ``` V0703 10:34:13.273000 140345997743104 torch/_dynamo/guards.py:2590] [0/1] [__recompiles] Recompiling function f in /data/users/ezyang/a/pytorch/b.py:3 V0703 10:34:13.273000 140345997743104 torch/_dynamo/guards.py:2590] [0/1] [__recompiles] triggered by the following guard failure(s): V0703 10:34:13.273000 140345997743104 torch/_dynamo/guards.py:2590] [0/1] [__recompiles] - 0/0: tensor 'L['x']' size mismatch at index 0. expected 4, actual 5 ``` Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/130043 Approved by: https://github.com/anijain2305
75 lines
2.3 KiB
C++
75 lines
2.3 KiB
C++
#include <torch/csrc/dynamo/init.h>
|
|
|
|
#include <pybind11/stl_bind.h>
|
|
#include <torch/csrc/Exceptions.h>
|
|
#include <torch/csrc/dynamo/cache_entry.h>
|
|
#include <torch/csrc/dynamo/cpython_defs.h>
|
|
#include <torch/csrc/dynamo/eval_frame.h>
|
|
#include <torch/csrc/dynamo/extra_state.h>
|
|
#include <torch/csrc/dynamo/guards.h>
|
|
#include <torch/csrc/dynamo/python_compiled_autograd.h>
|
|
#include <torch/csrc/utils/pybind.h>
|
|
#include <torch/csrc/utils/python_compat.h>
|
|
|
|
static struct PyModuleDef _module =
|
|
{PyModuleDef_HEAD_INIT, "torch._C._dynamo", "", -1, nullptr};
|
|
|
|
PYBIND11_MAKE_OPAQUE(std::vector<uint8_t>);
|
|
|
|
namespace torch::dynamo {
|
|
|
|
#if IS_PYTHON_3_11_PLUS
|
|
|
|
std::vector<uint8_t> _PyOpcode_Caches_vec(
|
|
THP_PyOpcode_Caches,
|
|
THP_PyOpcode_Caches + THP_PyOpcode_Caches_size);
|
|
|
|
#else
|
|
|
|
std::vector<uint8_t> _PyOpcode_Caches_vec;
|
|
|
|
#endif
|
|
|
|
using torch::dynamo::autograd::torch_c_dynamo_compiled_autograd_init;
|
|
|
|
void initDynamoBindings(PyObject* torch) {
|
|
PyObject* dynamo = PyModule_Create(&_module);
|
|
if (dynamo == nullptr || PyModule_AddObject(torch, "_dynamo", dynamo) != 0) {
|
|
throw python_error();
|
|
}
|
|
|
|
PyObject* eval_frame = torch_c_dynamo_eval_frame_init();
|
|
if (eval_frame == nullptr ||
|
|
PyModule_AddObject(dynamo, "eval_frame", eval_frame) != 0) {
|
|
throw python_error();
|
|
}
|
|
|
|
PyObject* guards = torch_c_dynamo_guards_init();
|
|
if (guards == nullptr || PyModule_AddObject(dynamo, "guards", guards) != 0) {
|
|
throw python_error();
|
|
}
|
|
|
|
PyObject* compiled_autograd = torch_c_dynamo_compiled_autograd_init();
|
|
if (compiled_autograd == nullptr ||
|
|
PyModule_AddObject(dynamo, "compiled_autograd", compiled_autograd) != 0) {
|
|
throw python_error();
|
|
}
|
|
|
|
auto m = py::handle(eval_frame).cast<py::module>();
|
|
|
|
py::class_<CacheEntry>(m, "_CacheEntry")
|
|
.def_readonly("check_fn", &CacheEntry::check_fn)
|
|
.def_readonly("code", &CacheEntry::code)
|
|
.def_readonly("compile_id", &CacheEntry::compile_id)
|
|
.def_property_readonly("next", &CacheEntry::next);
|
|
|
|
py::class_<ExtraState>(m, "_ExtraState")
|
|
.def("invalidate", &ExtraState::invalidate);
|
|
|
|
m.def("_debug_get_cache_entry_list", &_debug_get_cache_entry_list);
|
|
py::bind_vector<std::vector<uint8_t>>(m, "VectorUInt8");
|
|
m.attr("py_opcode_caches") = _PyOpcode_Caches_vec;
|
|
}
|
|
|
|
} // namespace torch::dynamo
|