mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[PyTorch] Support additional arguments in Python record function (#65736)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/65736 We ran into some limitations to extract PyTorch operator parameters through hooks or the execution graph. Some of these limitations are not due to the operator not exposing them, rather the inputs for these operators are already fused/processed in some cases (like embedding table). We want to be able to attach some metadata to the user scope record functions allowing the profilers to later extract these information. The record function C++ API already supports taking inputs and outputs information. The corresponding Python interface does not support them and only allows a string name as record function parameter. This diff adds support for user to optionally to add additional arguments to the record function in two ways. 1. to remain backward compatible with `record_function_op`, we have added an optional string arg to the interface: `with record_function(name, arg_str)`. 2. to support data dependency graph, we also have the new `torch.autograd._record_function_with_args_enter` and `torch.autograd._record_function_with_args_exit` functions to provide an interface where we can give additional tensor arguments. For now we imagine this can be used for debugging or analysis purpose. In this form, we currently support some basic data types as inputs: scalars, string, list, and tensor. Example usage: ``` # record_function operator with a name and optionally, a string for arguments. with record_function("## TEST 1 ##", "[1, 2, 3]"): <actual module or operator> # more general form of record_function a = _record_function_with_args_enter("## TEST 2 ##", 1, False, 2.5, [u, u], "hello", u) <actual module or operator> _record_function_with_args_exit(a) ``` Corresponding outputs in execution graph: ``` { "name": "## TEST 2 ##", "id": 7, "parent": 3, "fw_parent": 0, "scope": 5, "tid": 1, "fw_tid": 0, "inputs": [1,false,2.5,[6,6],"hello",6], "input_shapes": [[],[],[],[[3,4,5],[3,4,5]],[],[3,4,5]], "input_types": ["Int","Bool","Double","GenericList[Tensor(float),Tensor(float)]","String","Tensor(float)"], "outputs": [], "output_shapes": [], "output_types": [] }, { "name": "## TEST 1 ##", "id": 3, "parent": 2, "fw_parent": 0, "scope": 5, "tid": 1, "fw_tid": 0, "inputs": ["1, 2, 3"], "input_shapes": [[]], "input_types": ["String"], "outputs": [], "output_shapes": [], "output_types": [] }, ``` Test Plan: ``` => buck build caffe2/test:profiler --show-output => buck-out/gen/caffe2/test/profiler#binary.par test_profiler.TestRecordFunction test_record_function (test_profiler.TestRecordFunction) ... Log file: /tmp/libkineto_activities_1651304.json Net filter: Target net for iteration count: Net Iterations: 3 INFO:2021-09-27 01:10:15 1651304:1651304 Config.cpp:424] Trace start time: 2021-09-27 01:10:30 Trace duration: 500ms Warmup duration: 5s Net size threshold: 0 GPU op count threshold: 0 Max GPU buffer size: 128MB Enabled activities: cpu_op,user_annotation,external_correlation,cuda_runtime,cpu_instant_event Manifold bucket: gpu_traces Manifold object: tree/traces/clientAPI/0/1632730215/devvm2060.ftw0/libkineto_activities_1651304.json Trace compression enabled: 1 INFO:2021-09-27 01:10:15 1651304:1651304 ActivityProfiler.cpp:536] Tracing starting in 14s INFO:2021-09-27 01:10:15 1651304:1651304 ActivityProfiler.cpp:48] Target net for iterations not specified - picking first encountered that passes net filter INFO:2021-09-27 01:10:15 1651304:1651304 ActivityProfiler.cpp:57] Tracking net PyTorch Profiler for 3 iterations INFO:2021-09-27 01:10:15 1651304:1651304 ActivityProfiler.cpp:126] Processing 1 CPU buffers INFO:2021-09-27 01:10:15 1651304:1651304 ActivityProfiler.cpp:686] Recorded nets: INFO:2021-09-27 01:10:15 1651304:1651304 ActivityProfiler.cpp:689] PyTorch Profiler: 1 iterations ok ---------------------------------------------------------------------- Ran 1 test in 0.021s OK ``` Reviewed By: gdankel Differential Revision: D31165259 fbshipit-source-id: 15920aaef7138c666e5eca2a71c3bf33073eadc4
This commit is contained in:
parent
9918fd8305
commit
ecb7b38c00
|
|
@ -517,6 +517,7 @@ void RecordFunction::before(const char* name, int64_t sequence_nr) {
|
||||||
if (!isActive()) {
|
if (!isActive()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
state_->op_input_size = state_->inputs_.size();
|
||||||
state_->name_ = StringView(name);
|
state_->name_ = StringView(name);
|
||||||
state_->sequence_nr_ = sequence_nr;
|
state_->sequence_nr_ = sequence_nr;
|
||||||
state_->thread_id_ = currentThreadId();
|
state_->thread_id_ = currentThreadId();
|
||||||
|
|
@ -529,6 +530,7 @@ void RecordFunction::before(std::string name, int64_t sequence_nr) {
|
||||||
if (!isActive()) {
|
if (!isActive()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
state_->op_input_size = state_->inputs_.size();
|
||||||
state_->name_ = StringView(std::move(name));
|
state_->name_ = StringView(std::move(name));
|
||||||
state_->sequence_nr_ = sequence_nr;
|
state_->sequence_nr_ = sequence_nr;
|
||||||
state_->thread_id_ = currentThreadId();
|
state_->thread_id_ = currentThreadId();
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ from torch.testing._internal.common_cuda import TEST_MULTIGPU
|
||||||
from torch.testing._internal.common_utils import (
|
from torch.testing._internal.common_utils import (
|
||||||
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
|
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
|
||||||
TemporaryFileName, TemporaryDirectoryName)
|
TemporaryFileName, TemporaryDirectoryName)
|
||||||
|
from torch.autograd import (_record_function_with_args_enter, _record_function_with_args_exit)
|
||||||
from torch.autograd.profiler import profile as _profile
|
from torch.autograd.profiler import profile as _profile
|
||||||
from torch.profiler import (
|
from torch.profiler import (
|
||||||
kineto_available, profile, record_function, supported_activities,
|
kineto_available, profile, record_function, supported_activities,
|
||||||
|
|
@ -57,6 +58,29 @@ class TestProfilerCUDA(TestCase):
|
||||||
self.assertTrue(not (is_increasing and max_diff > 100 * 1024),
|
self.assertTrue(not (is_increasing and max_diff > 100 * 1024),
|
||||||
msg='memory usage is increasing, {}'.format(str(last_rss)))
|
msg='memory usage is increasing, {}'.format(str(last_rss)))
|
||||||
|
|
||||||
|
class TestRecordFunction(TestCase):
|
||||||
|
def _record_function_with_param(self):
|
||||||
|
u = torch.randn(3, 4, 5, requires_grad=True)
|
||||||
|
with _profile(with_stack=True, use_kineto=kineto_available(), record_shapes=True) as prof:
|
||||||
|
with record_function("## TEST 1 ##", "1, 2, 3"):
|
||||||
|
rf_handle = _record_function_with_args_enter("## TEST 2 ##", 1, False, 2.5, [u, u], "hello", u)
|
||||||
|
_record_function_with_args_exit(rf_handle)
|
||||||
|
return prof
|
||||||
|
|
||||||
|
def test_record_function(self):
|
||||||
|
prof_result = self._record_function_with_param()
|
||||||
|
found_test_1 = False
|
||||||
|
found_test_2 = False
|
||||||
|
for e in prof_result.function_events:
|
||||||
|
if "## TEST 1 ##" == e.name:
|
||||||
|
found_test_1 = True
|
||||||
|
self.assertTrue(e.input_shapes == [[]])
|
||||||
|
elif "## TEST 2 ##" == e.name:
|
||||||
|
found_test_2 = True
|
||||||
|
self.assertTrue(e.input_shapes == [[], [], [], [], [], [3, 4, 5]])
|
||||||
|
self.assertTrue(found_test_1)
|
||||||
|
self.assertTrue(found_test_2)
|
||||||
|
|
||||||
class TestProfiler(TestCase):
|
class TestProfiler(TestCase):
|
||||||
def test_source(self):
|
def test_source(self):
|
||||||
"""Checks that source code attribution works for eager, TS and autograd mode
|
"""Checks that source code attribution works for eager, TS and autograd mode
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,8 @@
|
||||||
from typing import List, Set, Callable
|
from typing import List, Set, Callable, Any
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
# Defined in tools/autograd/init.cpp
|
# Defined in tools/autograd/init.cpp
|
||||||
|
|
||||||
class ProfilerState(Enum):
|
class ProfilerState(Enum):
|
||||||
|
|
@ -86,6 +88,8 @@ def _disable_profiler() -> _ProfilerResult: ...
|
||||||
def _profiler_enabled() -> bool: ...
|
def _profiler_enabled() -> bool: ...
|
||||||
def _add_metadata_json(key: str, value: str) -> None: ...
|
def _add_metadata_json(key: str, value: str) -> None: ...
|
||||||
def kineto_available() -> bool: ...
|
def kineto_available() -> bool: ...
|
||||||
|
def _record_function_with_args_enter(name: str, args: List[Any]) -> torch.Tensor: ...
|
||||||
|
def _record_function_with_args_exit(handle: torch.Tensor) -> None: ...
|
||||||
def _supported_activities() -> Set[ProfilerActivity]: ...
|
def _supported_activities() -> Set[ProfilerActivity]: ...
|
||||||
def _enable_record_function(enable: bool) -> None: ...
|
def _enable_record_function(enable: bool) -> None: ...
|
||||||
def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ...
|
def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ...
|
||||||
|
|
|
||||||
|
|
@ -305,6 +305,7 @@ if not torch._C._autograd_init():
|
||||||
from torch._C._autograd import (DeviceType, ProfilerActivity, ProfilerState, ProfilerConfig, ProfilerEvent,
|
from torch._C._autograd import (DeviceType, ProfilerActivity, ProfilerState, ProfilerConfig, ProfilerEvent,
|
||||||
_enable_profiler_legacy, _disable_profiler_legacy, _profiler_enabled,
|
_enable_profiler_legacy, _disable_profiler_legacy, _profiler_enabled,
|
||||||
_enable_record_function, _set_empty_test_observer, kineto_available,
|
_enable_record_function, _set_empty_test_observer, kineto_available,
|
||||||
|
_record_function_with_args_enter, _record_function_with_args_exit,
|
||||||
_supported_activities, _add_metadata_json, SavedTensor,
|
_supported_activities, _add_metadata_json, SavedTensor,
|
||||||
_register_saved_tensors_default_hooks, _reset_saved_tensors_default_hooks)
|
_register_saved_tensors_default_hooks, _reset_saved_tensors_default_hooks)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -423,8 +423,9 @@ class record_function(ContextDecorator):
|
||||||
CUDA time total: 0.000us
|
CUDA time total: 0.000us
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, name: str):
|
def __init__(self, name: str, args: Optional[str] = None):
|
||||||
self.name: str = name
|
self.name: str = name
|
||||||
|
self.args: Optional[str] = args
|
||||||
# Whether or not we should run record function's end callbacks when exiting.
|
# Whether or not we should run record function's end callbacks when exiting.
|
||||||
self.run_callbacks_on_exit: bool = True
|
self.run_callbacks_on_exit: bool = True
|
||||||
# Stores underlying RecordFunction as a tensor. TODO: move to custom
|
# Stores underlying RecordFunction as a tensor. TODO: move to custom
|
||||||
|
|
@ -432,7 +433,7 @@ class record_function(ContextDecorator):
|
||||||
self.handle: torch.Tensor = torch.zeros(1)
|
self.handle: torch.Tensor = torch.zeros(1)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.handle = torch.ops.profiler._record_function_enter(self.name)
|
self.handle = torch.ops.profiler._record_function_enter(self.name, self.args)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
|
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,10 @@
|
||||||
#include <torch/csrc/utils/pybind.h>
|
#include <torch/csrc/utils/pybind.h>
|
||||||
#include <torch/csrc/autograd/autograd.h>
|
#include <torch/csrc/autograd/autograd.h>
|
||||||
#include <torch/csrc/autograd/grad_mode.h>
|
#include <torch/csrc/autograd/grad_mode.h>
|
||||||
|
#include <torch/csrc/jit/python/pybind_utils.h>
|
||||||
#include <ATen/autocast_mode.h>
|
#include <ATen/autocast_mode.h>
|
||||||
|
#include <ATen/cpp_custom_type_hack.h>
|
||||||
|
#include <ATen/record_function.h>
|
||||||
#include <torch/csrc/autograd/profiler.h>
|
#include <torch/csrc/autograd/profiler.h>
|
||||||
#include <torch/csrc/autograd/python_function.h>
|
#include <torch/csrc/autograd/python_function.h>
|
||||||
#include <torch/csrc/autograd/function.h>
|
#include <torch/csrc/autograd/function.h>
|
||||||
|
|
@ -250,6 +253,35 @@ PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject *unused) {
|
||||||
#endif
|
#endif
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// NOTICE: These record functions are not torch operators and may not show up
|
||||||
|
// in TorchScript tracing, FX transforms, or operator serialization. For these
|
||||||
|
// use cases, please use `torch.profiler.record_function`.
|
||||||
|
// Creates a new profiling scope using RecordFunction and invokes its starting
|
||||||
|
// callbacks.
|
||||||
|
m.def("_record_function_with_args_enter", [](const std::string& name, py::args args) {
|
||||||
|
auto rec = std::make_unique<at::RecordFunction>(at::RecordScope::USER_SCOPE);
|
||||||
|
if (rec->isActive()) {
|
||||||
|
if (rec->needsInputs()) {
|
||||||
|
auto iv_inputs = std::vector<c10::IValue>();
|
||||||
|
for (const auto& arg : args) {
|
||||||
|
iv_inputs.push_back(torch::jit::toTypeInferredIValue(arg));
|
||||||
|
}
|
||||||
|
rec->before(name, iv_inputs);
|
||||||
|
} else {
|
||||||
|
rec->before(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return at::cpp_custom_type_hack::create(std::move(rec), at::TensorOptions());
|
||||||
|
});
|
||||||
|
|
||||||
|
// Ends the profiling scope created with record_function_with_param_enter.
|
||||||
|
m.def("_record_function_with_args_exit", [](const at::Tensor& handle) {
|
||||||
|
// We don't actually need to do anything with handle just need to persist the
|
||||||
|
// lifetime until now.
|
||||||
|
auto& rec = at::cpp_custom_type_hack::cast<at::RecordFunction>(handle);
|
||||||
|
rec.end();
|
||||||
|
});
|
||||||
|
|
||||||
m.def("_supported_activities", []() {
|
m.def("_supported_activities", []() {
|
||||||
std::set<ActivityType> activities {ActivityType::CPU};
|
std::set<ActivityType> activities {ActivityType::CPU};
|
||||||
#if defined(USE_KINETO) && !defined(LIBKINETO_NOCUPTI)
|
#if defined(USE_KINETO) && !defined(LIBKINETO_NOCUPTI)
|
||||||
|
|
|
||||||
|
|
@ -16,9 +16,17 @@ namespace profiler {
|
||||||
|
|
||||||
// Creates a new profiling scope using RecordFunction and invokes its starting
|
// Creates a new profiling scope using RecordFunction and invokes its starting
|
||||||
// callbacks.
|
// callbacks.
|
||||||
at::Tensor record_function_enter(const std::string& name) {
|
at::Tensor record_function_enter(
|
||||||
|
const std::string& name,
|
||||||
|
const c10::optional<std::string>& args) {
|
||||||
auto rec = std::make_unique<at::RecordFunction>(at::RecordScope::USER_SCOPE);
|
auto rec = std::make_unique<at::RecordFunction>(at::RecordScope::USER_SCOPE);
|
||||||
rec->before(name);
|
if (rec->isActive()) {
|
||||||
|
if (rec->needsInputs() && args.has_value()) {
|
||||||
|
rec->before(name, std::vector<c10::IValue>{c10::IValue{args.value()}});
|
||||||
|
} else {
|
||||||
|
rec->before(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
return at::cpp_custom_type_hack::create(std::move(rec), at::TensorOptions());
|
return at::cpp_custom_type_hack::create(std::move(rec), at::TensorOptions());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -67,7 +75,7 @@ c10::intrusive_ptr<c10::ivalue::Future> _call_end_callbacks_on_fut(
|
||||||
|
|
||||||
// Internal only, do not use directly, use Python's record_function()
|
// Internal only, do not use directly, use Python's record_function()
|
||||||
TORCH_LIBRARY_FRAGMENT(profiler, m) {
|
TORCH_LIBRARY_FRAGMENT(profiler, m) {
|
||||||
m.def("_record_function_enter", &record_function_enter);
|
m.def("_record_function_enter(str name, str? args=None) -> Tensor", &record_function_enter);
|
||||||
m.def("_record_function_exit", &record_function_exit);
|
m.def("_record_function_exit", &record_function_exit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,13 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <ATen/record_function.h>
|
#include <ATen/record_function.h>
|
||||||
|
#include <c10/util/Optional.h>
|
||||||
|
|
||||||
namespace torch {
|
namespace torch {
|
||||||
namespace autograd {
|
namespace autograd {
|
||||||
namespace profiler {
|
namespace profiler {
|
||||||
// Creates a new profiling scope using RecordFunction and invokes its starting
|
// Creates a new profiling scope using RecordFunction and invokes its starting
|
||||||
// callbacks.
|
// callbacks.
|
||||||
TORCH_API at::Tensor record_function_enter(const std::string& name);
|
TORCH_API at::Tensor record_function_enter(const std::string& name, const c10::optional<std::string>& args = c10::nullopt);
|
||||||
|
|
||||||
// Schedules RecordFunction's end callbacks to be run on completion of a future.
|
// Schedules RecordFunction's end callbacks to be run on completion of a future.
|
||||||
TORCH_API c10::intrusive_ptr<c10::ivalue::Future> _call_end_callbacks_on_fut(
|
TORCH_API c10::intrusive_ptr<c10::ivalue::Future> _call_end_callbacks_on_fut(
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user