mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os
def get_compiled_files_list():
import json
with open("build/compile_commands.json") as f:
data = json.load(f)
files = [os.path.relpath(node['file']) for node in data]
for idx, fname in enumerate(files):
if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
return files
def run_clang_tidy(fname):
check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
changes = check_output(["git", "ls-files", "-m"])
if len(changes) == 0:
return
check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])
def main():
git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
compiled_files = get_compiled_files_list()
for idx, fname in enumerate(git_files):
if fname not in compiled_files:
continue
if fname.startswith("caffe2/contrib/aten/"):
continue
print(f"[{idx}/{len(git_files)}] Processing {fname}")
run_clang_tidy(fname)
if __name__ == "__main__":
main()
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892
Reviewed By: H-Huang
Differential Revision: D27991944
Pulled By: malfet
fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
89 lines
2.6 KiB
C++
89 lines
2.6 KiB
C++
#include <torch/csrc/jit/runtime/interpreter.h>
|
|
#include <torch/csrc/python_headers.h>
|
|
|
|
#include <torch/csrc/autograd/edge.h>
|
|
#include <torch/csrc/autograd/function.h>
|
|
#include <torch/csrc/autograd/profiler.h>
|
|
#include <torch/csrc/autograd/variable.h>
|
|
#include <torch/csrc/jit/ir/ir.h>
|
|
#include <torch/csrc/jit/python/pybind_utils.h>
|
|
#include <torch/csrc/jit/python/python_ir.h>
|
|
#include <torch/csrc/jit/runtime/custom_operator.h>
|
|
#include <torch/csrc/jit/runtime/graph_executor.h>
|
|
#include <torch/csrc/jit/runtime/operator.h>
|
|
|
|
#include <typeinfo>
|
|
|
|
#include <pybind11/pybind11.h>
|
|
#include <torch/csrc/Exceptions.h>
|
|
#include <torch/csrc/autograd/python_engine.h>
|
|
#include <torch/csrc/autograd/python_variable.h>
|
|
#include <torch/csrc/jit/python/pybind.h>
|
|
|
|
namespace py = pybind11;
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
|
|
namespace {
|
|
|
|
// Note: const_cast is used twice below to acquire a handle to a pyobject.
|
|
Operation createPythonOperation(const Node* op_) {
|
|
pybind11::gil_scoped_acquire gil;
|
|
const ConcretePythonOp* op = static_cast<const ConcretePythonOp*>(op_);
|
|
const py::function func = py::reinterpret_borrow<const py::function>(
|
|
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
|
py::handle(const_cast<ConcretePythonOp*>(op)->pyobj.get()));
|
|
|
|
size_t num_inputs = 0;
|
|
for (auto arg_type : op->cconv) {
|
|
if (arg_type == 'd')
|
|
num_inputs++;
|
|
}
|
|
|
|
AT_ASSERT(op->outputs().size() == 1);
|
|
|
|
return [=](Stack* stack) {
|
|
pybind11::gil_scoped_acquire gil;
|
|
py::tuple py_inputs(op->cconv.size());
|
|
size_t i = 0;
|
|
size_t next_scalar = 0;
|
|
size_t next_tensor = 0;
|
|
for (auto arg_type : op->cconv) {
|
|
if (arg_type == 'c') {
|
|
py_inputs[i] = py::reinterpret_borrow<const py::object>(
|
|
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
|
const_cast<ConcretePythonOp*>(op)
|
|
->scalar_args[next_scalar++]
|
|
.get());
|
|
} else if (arg_type == 'd') {
|
|
py_inputs[i] =
|
|
toPyObject(std::move(peek(stack, next_tensor, num_inputs)));
|
|
next_tensor++;
|
|
}
|
|
i++;
|
|
}
|
|
drop(stack, num_inputs);
|
|
try {
|
|
py::object py_output(func(*py_inputs));
|
|
stack->push_back(returnToIValue(op->output()->type(), py_output));
|
|
} catch (py::error_already_set& e) {
|
|
throw std::runtime_error(e.what());
|
|
}
|
|
};
|
|
}
|
|
|
|
c10::AliasAnalysisKind aliasAnalysisIsSpecialCase() {
|
|
return AliasAnalysisKind::INTERNAL_SPECIAL_CASE;
|
|
}
|
|
|
|
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
|
RegisterOperators reg({Operator(
|
|
prim::PythonOp,
|
|
createPythonOperation,
|
|
aliasAnalysisIsSpecialCase())});
|
|
|
|
} // namespace
|
|
} // namespace jit
|
|
} // namespace torch
|