Revert "[Profiler] Move legacy profiler out of torch/csrc/autograd (#85512)"

This reverts commit 157a3d2a7c.

Reverted https://github.com/pytorch/pytorch/pull/85512 on behalf of https://github.com/DanilBaibak due to Due to files were deleted, the internal build failed. Please re-submit via codev.
This commit is contained in:
PyTorch MergeBot 2022-10-14 14:56:59 +00:00
parent 4460e40db4
commit 8eb579e362
10 changed files with 32 additions and 35 deletions

View File

@ -131,6 +131,7 @@ libtorch_sources_common = sorted(core_sources_common + torch_unpickler_common)
# The profilers are not needed in the lite interpreter build.
libtorch_profiler_sources = [
"torch/csrc/autograd/profiler_legacy.cpp",
"torch/csrc/autograd/profiler_kineto.cpp",
"torch/csrc/profiler/collection.cpp",
"torch/csrc/profiler/kineto_shim.cpp",
@ -140,7 +141,6 @@ libtorch_profiler_sources = [
"torch/csrc/profiler/standalone/execution_graph_observer.cpp",
"torch/csrc/profiler/standalone/itt_observer.cpp",
"torch/csrc/profiler/standalone/nvtx_observer.cpp",
"torch/csrc/profiler/standalone/profiler_legacy.cpp",
"torch/csrc/profiler/stubs/base.cpp",
"torch/csrc/monitor/counters.cpp",
"torch/csrc/monitor/events.cpp",

View File

@ -1115,7 +1115,6 @@ def main():
'include/torch/csrc/onnx/*.h',
'include/torch/csrc/profiler/*.h',
'include/torch/csrc/profiler/orchestration/*.h',
'include/torch/csrc/profiler/standalone/*.h',
'include/torch/csrc/profiler/stubs/*.h',
'include/torch/csrc/utils/*.h',
'include/torch/csrc/tensor/*.h',

View File

@ -1,20 +1,4 @@
#pragma once
#include <torch/csrc/autograd/profiler_kineto.h>
#include <torch/csrc/profiler/orchestration/observer.h>
#include <torch/csrc/profiler/standalone/profiler_legacy.h>
// There are some components which use these symbols. Until we migrate them
// we have to mirror them in the old autograd namespace.
namespace torch {
namespace autograd {
namespace profiler {
using namespace ::torch::profiler_legacy;
using ::torch::profiler::impl::ActivityType;
using ::torch::profiler::impl::getProfilerConfig;
using ::torch::profiler::impl::ProfilerConfig;
using ::torch::profiler::impl::profilerEnabled;
using ::torch::profiler::impl::ProfilerState;
} // namespace profiler
} // namespace autograd
} // namespace torch
#include <torch/csrc/autograd/profiler_legacy.h>

View File

@ -58,12 +58,9 @@ inline int64_t getTimeUs() {
}
using torch::profiler::impl::ActiveProfilerType;
using torch::profiler::impl::ActivityType;
using torch::profiler::impl::dtypesToStr;
using torch::profiler::impl::EventType;
using torch::profiler::impl::ExtraFields;
using torch::profiler::impl::ProfilerConfig;
using torch::profiler::impl::ProfilerState;
using torch::profiler::impl::ProfilerStateBase;
using torch::profiler::impl::PyExtraFieldsBase;
using torch::profiler::impl::Result;

View File

@ -1,4 +1,4 @@
#include <torch/csrc/profiler/standalone/profiler_legacy.h>
#include <torch/csrc/autograd/profiler_legacy.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/jit/frontend/tracer.h>
@ -24,7 +24,8 @@
#include <iostream>
namespace torch {
namespace profiler_legacy {
namespace autograd {
namespace profiler {
// We decompose the profiler logic into the following components:
//
@ -678,5 +679,6 @@ void RecordProfile::processEvents(const std::vector<LegacyEvent*>& events) {
writeProfilerEventsToStream(out_, events);
}
} // namespace profiler_legacy
} // namespace profiler
} // namespace autograd
} // namespace torch

View File

@ -16,11 +16,11 @@
#include <torch/csrc/profiler/util.h>
namespace torch {
// namespace autograd {
// struct Node;
// } // namespace autograd
namespace autograd {
namespace profiler_legacy {
struct Node;
namespace profiler {
enum class C10_API_ENUM EventKind : uint16_t {
Mark,
@ -412,5 +412,6 @@ struct TORCH_API TLSLegacyProfilerGuard {
const c10::optional<ProfilerDisableOptions> profilerDisableOptions_;
};
} // namespace profiler_legacy
} // namespace profiler
} // namespace autograd
} // namespace torch

View File

@ -1,5 +1,5 @@
#pragma once
#include <torch/csrc/autograd/profiler.h>
#include <torch/csrc/autograd/profiler_kineto.h>
#include <torch/csrc/jit/mobile/module.h>
namespace torch {

View File

@ -1,3 +1,17 @@
#pragma once
#include <torch/csrc/profiler/orchestration/observer.h>
// There are some components which use these symbols. Until we migrate them
// we have to mirror them in the old autograd namespace.
namespace torch {
namespace autograd {
namespace profiler {
using torch::profiler::impl::ActivityType;
using torch::profiler::impl::getProfilerConfig;
using torch::profiler::impl::ProfilerConfig;
using torch::profiler::impl::profilerEnabled;
using torch::profiler::impl::ProfilerState;
} // namespace profiler
} // namespace autograd
} // namespace torch

View File

@ -153,7 +153,7 @@ class ExperimentalConfigWrapper {
// do not trace CPU or GPU events.
bool cupti_range_profiler = config_.profiler_metrics.size() > 0;
if (cupti_range_profiler &&
activities.count(torch::profiler::impl::ActivityType::CPU)) {
activities.count(torch::autograd::profiler::ActivityType::CPU)) {
LOG(WARNING)
<< "Cannot run range profiler with CPU activities, please only"
<< " use CUDA activity type";
@ -211,10 +211,10 @@ void prepareTrace(
}
std::set<libkineto::ActivityType> k_activities;
if (activities.count(torch::profiler::impl::ActivityType::CPU)) {
if (activities.count(torch::autograd::profiler::ActivityType::CPU)) {
k_activities.insert(cpuTypes.begin(), cpuTypes.end());
}
if (activities.count(torch::profiler::impl::ActivityType::CUDA)) {
if (activities.count(torch::autograd::profiler::ActivityType::CUDA)) {
k_activities.insert(cudaTypes.begin(), cudaTypes.end());
}

View File

@ -113,7 +113,7 @@ struct ActivityTraceWrapper {
#endif
};
using ActivitySet = std::set<torch::profiler::impl::ActivityType>;
using ActivitySet = std::set<torch::autograd::profiler::ActivityType>;
void prepareTrace(
const bool cpuOnly,
const ActivitySet& activities,