pytorch/torch/csrc/jit/codegen/cuda/instrumentation.cpp
jjsjann123 b21a6ff639 [NVFuser] Upstream push 0811 (#83239)
Syncing nvfuser devel branch to upstream master. https://github.com/csarofeen/pytorch/

Code changes includes:

- codegen improvements:
  1. double support in expression evaluator
- bug fixes:
  1. dropout fix - rework RNG to support broadcasted dropout (Fixes #82784)
  2. expand fix - Patch expand+reduction, expand+view, rework view analysis and guard
- scheduler:
  1. manual transpose schedule example
  2. WIP transpose scheduler

Commits that's in this PR from the devel branch:

```
b7435afcd22c917713c2f41a7237bc26e1183f14 Transpose scheduler, step 1 (#1854)
8a45dbf72034684eb8e18b1835b533e90b68f184 Add an example on how to manually schedule transpose (#1889)
83dbf56a9554b2efbd5416461d938fff477b0b27 Patch dropout fix (#1898)
69d3519a532250719b1aa8341b50e067b181b42d Expand+Reduction, Expand+View support, rework View analysis and guards (#1883)
15091c488e96343bdc49e3990acbf238a3b3da51 Rework RNG to correctly support broadcasted dropout (#1888)
aafe2d048aaac596e503596a41303423619f3954 Make ExpressionEvaluator support Double (#1885)
```

RUN_TORCHBENCH: nvfuser

Differential Revision: [D38657074](https://our.internmc.facebook.com/intern/diff/D38657074)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83239
Approved by: https://github.com/davidberard98
2022-08-25 02:23:22 +00:00

77 lines
1.8 KiB
C++

#include <torch/csrc/jit/codegen/cuda/instrumentation.h>
#include <c10/macros/Export.h>
#ifdef _WIN32
#include <c10/util/win32-headers.h>
#else
#include <pthread.h>
#include <unistd.h>
#endif
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
namespace inst {
Trace::Trace() {
const char* trace_filename = getenv("PYTORCH_NVFUSER_TRACE");
if (trace_filename != nullptr) {
log_file_ = fopen(trace_filename, "w");
TORCH_CHECK(log_file_ != nullptr, "Can't open trace file");
// Disable the file stream buffering, since it may result
// in torn writes in multi-threaded tracing
setbuf(log_file_, nullptr);
// Print the trace prologue
// (including a dummy TRACE_START event)
fprintf(log_file_, "{\n\"traceEvents\": [\n");
start_timestamp_ = Clock::now();
logEvent('I', "TRACE_START");
}
if (isOptionDisabled(DisableOption::Nvtx)) {
record_nvtx_range_ = false;
}
}
Trace::~Trace() {
if (log_file_ != nullptr) {
// Print trace epilogue
logEvent('I', "TRACE_END", ' ');
fprintf(log_file_, "],\n\"displayTimeUnit\": \"ms\"\n}\n");
fclose(log_file_);
}
}
void Trace::logEvent(char ph, const char* name, char sep) {
const std::chrono::duration<double> d = Clock::now() - start_timestamp_;
const double elapsed = d.count() * 1e6;
#ifdef _WIN32
const unsigned int pid = GetCurrentProcessId();
const unsigned int tid = GetCurrentThreadId();
#else
const unsigned int pid = getpid();
const unsigned int tid = std::hash<pthread_t>{}(pthread_self());
#endif // _WIN32
fprintf(
log_file_,
"{ \"name\": \"%s\", \"ph\": \"%c\", \"pid\": %u, \"tid\": %u, \"ts\": %.0f }%c\n",
name,
ph,
pid,
tid,
elapsed,
sep);
}
} // namespace inst
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch