mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary:
Anywhere we used #include "foo.h", we now say #include <foo.h>
Paths are adjusted to be rooted out of aten/src, torch/lib, or
the root level directory.
I modified CMakeLists.txt by hand to remove TH and THC from
the include paths.
I used the following script to do the canonicalization:
```
import subprocess
import re
import os.path
files = subprocess.check_output(['git', 'ls-files']).decode('utf-8').rstrip().split('\n')
for fn in files:
if not any(fn.endswith(suff) for suff in ['.cu', '.cpp', '.in', '.h', '.hpp', '.cu', '.cuh', '.cc']):
continue
if not any(fn.startswith(pref) for pref in ["aten/", "torch/"]):
continue
with open(fn, 'r') as f:
c = f.read()
def fmt(p):
return "#include <{}>".format(p)
def repl(m):
p = m.group(1)
if p in ["dlfcn.h", "unistd.h", "nvrtc.h", "cuda.h", "cuda_runtime.h", "cstdint", "cudnn.h", "Python.h", "cusparse.h", "cuda_runtime_api.h", "cuda_fp16.h", "cublas_v2.h", "stdint.h", "curand_kernel.h"]:
return fmt(p)
if any(p.startswith(pref) for pref in ["torch/csrc", "c10/", "ATen/", "caffe2/", "TH/", "THC/", "Eigen/", "gtest/", "zdl/", "gloo/", "onnx/", "miopen/"]):
return fmt(p)
for root in ["aten/src", "torch/lib", ""]:
for bad_root in [os.path.dirname(fn), "aten/src/TH", "aten/src/THC", "torch/csrc"]:
new_p = os.path.relpath(os.path.join(bad_root, p), root)
if not new_p.startswith("../") and (os.path.exists(os.path.join(root, new_p)) or os.path.exists(os.path.join(root, new_p + ".in"))):
return fmt(new_p)
print("ERROR: ", fn, p)
return m.group(0)
new_c = re.sub(r'#include "([^"]+)"', repl, c)
if new_c != c:
print(fn)
with open(fn, 'w') as f:
f.write(new_c)
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14849
Reviewed By: dzhulgakov
Differential Revision: D13363445
Pulled By: ezyang
fbshipit-source-id: 52361f878a672785f9306c9e9ab2513128092b68
59 lines
1.7 KiB
C++
59 lines
1.7 KiB
C++
#include <torch/csrc/autograd/functions/utils.h>
|
|
|
|
#include <torch/csrc/autograd/edge.h>
|
|
#include <torch/csrc/autograd/function.h>
|
|
#include <torch/csrc/autograd/variable.h>
|
|
|
|
#include <sstream>
|
|
#include <vector>
|
|
|
|
namespace torch { namespace autograd {
|
|
|
|
variable_list wrap_outputs(const variable_list& inputs, tensor_list&& outputs,
|
|
function_constructor ctr) {
|
|
variable_list result;
|
|
result.reserve(outputs.size());
|
|
if (!any_variable_requires_grad(inputs)) {
|
|
for (auto& output : outputs) {
|
|
if (output.defined()) {
|
|
result.push_back(make_variable(output, /*requires_grad=*/false));
|
|
} else {
|
|
result.emplace_back();
|
|
}
|
|
}
|
|
} else {
|
|
auto grad_fn = ctr(collect_next_edges(inputs));
|
|
for (auto& output : outputs) {
|
|
if (output.defined()) {
|
|
auto variable = autograd::make_variable(output, /*requires_grad=*/false);
|
|
autograd::create_gradient_edge(variable, grad_fn);
|
|
result.push_back(std::move(variable));
|
|
} else {
|
|
grad_fn->add_input_metadata(Function::undefined_input());
|
|
result.emplace_back();
|
|
}
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
void check_input_variables(const char* name, const variable_list& inputs, int args, int required_args) {
|
|
if (required_args == -1) {
|
|
required_args = args;
|
|
}
|
|
if (inputs.size() != (size_t)args) {
|
|
std::stringstream ss;
|
|
ss << name << ": expected " << args << " arguments (got " << inputs.size();
|
|
ss << ")";
|
|
throw std::runtime_error(ss.str());
|
|
}
|
|
for (int i = 0; i < required_args; ++i) {
|
|
if (!inputs[i].defined()) {
|
|
std::stringstream ss;
|
|
ss << name << ": expected Tensor at argument " << i << " (got None)";
|
|
throw std::runtime_error(ss.str());
|
|
}
|
|
}
|
|
}
|
|
}} // namespace torch::autograd
|