[Profiler][Minor] Separate standalone profilers from the main PyTorch profiler. (#85511)

There are a number of instrumentation utils which have been added to the profiler toolkit. They are generally small and self contained, often wrapping vendor APIs. (NVTX, ITT)

They don't really interact with the much more expansive machinery of the PyTorch profiler beyond registration / unregistration, minor util sharing, and reusing the profiler base class. Just as in the case of stubs, it makes sense to group them in a dedicated subfolder.

Differential Revision: [D39108649](https://our.internmc.facebook.com/intern/diff/D39108649/)

**NOTE FOR REVIEWERS**: This PR has internal Meta-specific changes or comments, please review them on [Phabricator](https://our.internmc.facebook.com/intern/diff/D39108649/)!
Pull Request resolved: https://github.com/pytorch/pytorch/pull/85511
Approved by: https://github.com/albanD
This commit is contained in:
Taylor Robie 2022-10-13 07:49:00 -07:00 committed by PyTorch MergeBot
parent b8f14b7877
commit 35fb007749
13 changed files with 33 additions and 32 deletions

View File

@ -134,13 +134,13 @@ libtorch_profiler_sources = [
"torch/csrc/autograd/profiler_legacy.cpp",
"torch/csrc/autograd/profiler_kineto.cpp",
"torch/csrc/profiler/collection.cpp",
"torch/csrc/profiler/execution_graph_observer.cpp",
"torch/csrc/profiler/kineto_shim.cpp",
"torch/csrc/profiler/nvtx_observer.cpp",
"torch/csrc/profiler/kineto_client_interface.cpp",
"torch/csrc/profiler/itt_observer.cpp",
"torch/csrc/profiler/orchestration/observer.cpp",
"torch/csrc/profiler/orchestration/python_tracer.cpp",
"torch/csrc/profiler/standalone/execution_graph_observer.cpp",
"torch/csrc/profiler/standalone/itt_observer.cpp",
"torch/csrc/profiler/standalone/nvtx_observer.cpp",
"torch/csrc/profiler/stubs/base.cpp",
"torch/csrc/monitor/counters.cpp",
"torch/csrc/monitor/events.cpp",

View File

@ -68,10 +68,6 @@ def _profiler_enabled() -> bool: ...
def _add_metadata_json(key: str, value: str) -> None: ...
def _kineto_step() -> None: ...
def kineto_available() -> bool: ...
def _add_execution_graph_observer(output_file_path: str) -> bool: ...
def _remove_execution_graph_observer() -> None: ...
def _enable_execution_graph_observer() -> None: ...
def _disable_execution_graph_observer() -> None: ...
def _record_function_with_args_enter(name: str, args: List[Any]) -> torch.Tensor: ...
def _record_function_with_args_exit(handle: torch.Tensor) -> None: ...
def _supported_activities() -> Set[ProfilerActivity]: ...

View File

@ -159,3 +159,8 @@ class _ExtraFields_PyCall:
caller: _PyFrameState
class _ExtraFields_Kineto: ...
def _add_execution_graph_observer(output_file_path: str) -> bool: ...
def _remove_execution_graph_observer() -> None: ...
def _enable_execution_graph_observer() -> None: ...
def _disable_execution_graph_observer() -> None: ...

View File

@ -24,7 +24,6 @@
#include <torch/csrc/autograd/utils/wrap_outputs.h>
#include <torch/csrc/jit/python/pybind_utils.h>
#include <torch/csrc/profiler/collection.h>
#include <torch/csrc/profiler/execution_graph_observer.h>
#include <torch/csrc/profiler/kineto_shim.h>
#include <torch/csrc/utils/disable_torch_function.h>
#include <torch/csrc/utils/pybind.h>
@ -238,21 +237,6 @@ PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused) {
m.def("_kineto_step", profilerStep); // Only if `USE_KINETO` is set
m.def("kineto_available", []() { return torch::profiler::kKinetoAvailable; });
// PyTorch profiler execution graph internal interface.
m.def(
"_add_execution_graph_observer",
&torch::profiler::impl::addExecutionGraphObserver,
py::arg("output_file_name"));
m.def(
"_remove_execution_graph_observer",
&torch::profiler::impl::removeExecutionGraphObserver);
m.def(
"_enable_execution_graph_observer",
&torch::profiler::impl::enableExecutionGraphObserver);
m.def(
"_disable_execution_graph_observer",
&torch::profiler::impl::disableExecutionGraphObserver);
// NOTICE: These record functions are not torch operators and may not show up
// in TorchScript tracing, FX transforms, or operator serialization. For these
// use cases, please use `torch.profiler.record_function`.

View File

@ -12,10 +12,10 @@
#include <torch/csrc/profiler/api.h>
#include <torch/csrc/profiler/collection.h>
#include <torch/csrc/profiler/containers.h>
#include <torch/csrc/profiler/itt_observer.h>
#include <torch/csrc/profiler/kineto_shim.h>
#include <torch/csrc/profiler/nvtx_observer.h>
#include <torch/csrc/profiler/orchestration/observer.h>
#include <torch/csrc/profiler/standalone/itt_observer.h>
#include <torch/csrc/profiler/standalone/nvtx_observer.h>
#include <torch/csrc/profiler/util.h>
#include <ATen/Context.h>

View File

@ -5,6 +5,7 @@
#include <torch/csrc/autograd/utils/wrap_outputs.h>
#include <torch/csrc/jit/python/pybind_utils.h>
#include <torch/csrc/profiler/collection.h>
#include <torch/csrc/profiler/standalone/execution_graph_observer.h>
#include <torch/csrc/utils/pybind.h>
namespace torch {
@ -251,6 +252,21 @@ void initPythonBindings(PyObject* module) {
.def_property_readonly("duration_time_ns", [](const Result& r) {
return r.endTimeNS() - r.start_time_ns_;
});
// PyTorch profiler execution graph internal interface.
m.def(
"_add_execution_graph_observer",
&torch::profiler::impl::addExecutionGraphObserver,
py::arg("output_file_name"));
m.def(
"_remove_execution_graph_observer",
&torch::profiler::impl::removeExecutionGraphObserver);
m.def(
"_enable_execution_graph_observer",
&torch::profiler::impl::enableExecutionGraphObserver);
m.def(
"_disable_execution_graph_observer",
&torch::profiler::impl::disableExecutionGraphObserver);
}
} // namespace profiler

View File

@ -27,7 +27,7 @@
#include <ATen/core/stack.h>
#include <ATen/record_function.h>
#include <c10/util/irange.h>
#include <torch/csrc/profiler/execution_graph_observer.h>
#include <torch/csrc/profiler/standalone/execution_graph_observer.h>
#include <torch/csrc/profiler/util.h>
using namespace at;

View File

@ -1,4 +1,4 @@
#include <torch/csrc/profiler/itt_observer.h>
#include <torch/csrc/profiler/standalone/itt_observer.h>
#include <torch/csrc/profiler/stubs/base.h>
#include <torch/csrc/profiler/util.h>

View File

@ -1,4 +1,4 @@
#include <torch/csrc/profiler/nvtx_observer.h>
#include <torch/csrc/profiler/standalone/nvtx_observer.h>
#include <torch/csrc/profiler/stubs/base.h>
#include <torch/csrc/profiler/util.h>

View File

@ -9,13 +9,13 @@ from warnings import warn
import torch
import torch.autograd.profiler as prof
from torch._C._autograd import (
from torch._C._profiler import (
_add_execution_graph_observer,
_remove_execution_graph_observer,
_enable_execution_graph_observer,
_disable_execution_graph_observer,
_enable_execution_graph_observer,
_ExperimentalConfig,
_remove_execution_graph_observer,
)
from torch._C._profiler import _ExperimentalConfig
from torch.autograd import ProfilerActivity, kineto_available
__all__ = ['supported_activities', 'ProfilerAction', 'schedule', 'tensorboard_trace_handler', 'profile',