pytorch/caffe2/operators/async_net_barrier_op.cc
Nikita Shulga 4cb534f92e Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os

def get_compiled_files_list():
    import json
    with open("build/compile_commands.json") as f:
        data = json.load(f)
    files = [os.path.relpath(node['file']) for node in data]
    for idx, fname in enumerate(files):
        if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
            files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
    return files

def run_clang_tidy(fname):
    check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
    changes = check_output(["git", "ls-files", "-m"])
    if len(changes) == 0:
        return
    check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])

def main():
    git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
    compiled_files = get_compiled_files_list()
    for idx, fname in enumerate(git_files):
        if fname not in compiled_files:
            continue
        if fname.startswith("caffe2/contrib/aten/"):
            continue
        print(f"[{idx}/{len(git_files)}] Processing {fname}")
        run_clang_tidy(fname)

if __name__ == "__main__":
    main()
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892

Reviewed By: H-Huang

Differential Revision: D27991944

Pulled By: malfet

fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 14:10:25 -07:00

54 lines
1.8 KiB
C++

#include "caffe2/operators/async_net_barrier_op.h"
namespace caffe2 {
namespace {
std::pair<std::vector<DeviceOption>, std::vector<DeviceOption>>
asyncBarrierOpDevInfer(const OperatorDef& def) {
auto op_device =
def.has_device_option() ? def.device_option() : DeviceOption();
ArgumentHelper helper(def);
auto cross_device = helper.GetSingleArgument<int>("cross_device", 0);
std::vector<DeviceOption> opt;
for (int i = 0; i < def.input().size(); ++i) {
if (cross_device == 1) {
DeviceOption dev;
dev.set_device_type(op_device.device_type());
dev.set_device_id(i);
opt.push_back(dev);
} else {
opt.push_back(op_device);
}
}
return std::make_pair(opt, opt);
}
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
OPERATOR_SCHEMA(AsyncNetBarrier)
.NumInputs(1, INT_MAX)
.NumOutputs(1, INT_MAX)
.IdenticalTypeAndShape()
.InputsCanCrossDevices()
.AllowOneToOneInplace()
.DeviceInferenceFunction(asyncBarrierOpDevInfer)
.SetDoc(R"DOC(
This is a pretty much no-op operator, since it's only purposes is make sure that
async_scheduling will schedule certian operations earlier than others.
Exaple where this operator can work well - mixture of data-parallel and model-
parallel training, where one wants to force that all copies are started before
data-parallel part starts.
)DOC")
.Arg(
"cross_device",
"Specifies either inputs should be across different devices in dev inference options");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
SHOULD_NOT_DO_GRADIENT(AsyncNetBarrier);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
REGISTER_CPU_OPERATOR(AsyncNetBarrier, AsyncNetBarrierOp<CPUContext>);
} // namespace caffe2