pytorch/torch/csrc/utils/python_dispatch.cpp
Edward Yang dd64e738c5 Expunge TensorId from all DispatchKey names. (#36240)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/36240

It's annoying, historical, and unnecessary (enum class is already
namespaced).  I did this codemod with:

```
git grep -l 'CPUTensorId' | xargs sed -i 's/CPUTensorId/CPU/g'
git grep -l 'CUDATensorId' | xargs sed -i 's/CUDATensorId/CUDA/g'
git grep -l 'VariableTensorId' | xargs sed -i 's/VariableTensorId/Autograd/g'
git grep -l 'HIPTensorId' | xargs sed -i 's/HIPTensorId/HIP/g'
git grep -l 'MSNPUTensorId' | xargs sed -i 's/MSNPUTensorId/MSNPU/g'
git grep -l 'XLATensorId' | xargs sed -i 's/XLATensorId/XLA/g'
git grep -l 'PrivateUse1_TensorId' | xargs sed -i 's/PrivateUse1_TensorId/PrivateUse1/g'
git grep -l 'PrivateUse2_TensorId' | xargs sed -i 's/PrivateUse2_TensorId/PrivateUse2/g'
git grep -l 'PrivateUse3_TensorId' | xargs sed -i 's/PrivateUse3_TensorId/PrivateUse3/g'
git grep -l 'AutocastTensorId' | xargs sed -i 's/AutocastTensorId/Autocast/g'
git grep -l '_PreAutogradTensorId' | xargs sed -i 's/_PreAutogradTensorId/_PreAutograd/g'
git grep -l 'TESTING_ONLY_GenericWrapperTensorId' | xargs sed -i 's/TESTING_ONLY_GenericWrapperTensorId/TESTING_ONLY_GenericWrapper/g'
git grep -l 'TESTING_ONLY_GenericModeTensorId' | xargs sed -i 's/TESTING_ONLY_GenericModeTensorId/TESTING_ONLY_GenericMode/g'
```

Then I did a git grep for remaining TensorId occurrences, and manually
killed those (mostly in codegen, and some docs that needed updating).

Signed-off-by: Edward Z. Yang <ezyang@fb.com>

Test Plan: Imported from OSS

Differential Revision: D20929255

Pulled By: ezyang

fbshipit-source-id: dc371b6aa6e6ea7c0a5660137c14debde806a09d
2020-04-13 23:33:44 -07:00

163 lines
5.5 KiB
C++

#include <torch/csrc/utils/python_dispatch.h>
#include <torch/csrc/jit/frontend/function_schema_parser.h>
#include <ATen/core/op_registration/op_registration.h>
#include <ATen/ATen.h>
#include <pybind11/operators.h>
#include <iostream>
namespace py = pybind11;
namespace torch {
namespace impl {
namespace dispatch {
c10::optional<c10::DispatchKey> parseDispatchKey(const std::string& k) {
static std::unordered_map<std::string, c10::DispatchKey> key_map = {
{"cpu", c10::DispatchKey::CPU},
{"cuda", c10::DispatchKey::CUDA},
{"xla", c10::DispatchKey::XLA},
{"autograd", c10::DispatchKey::Autograd},
{"", c10::DispatchKey::Undefined},
};
auto it = key_map.find(k);
TORCH_CHECK(it != key_map.end(), "could not parse ", k);
if (it->second == c10::DispatchKey::Undefined) {
return c10::nullopt;
} else {
return c10::make_optional(it->second);
}
}
c10::AliasAnalysisKind parseAliasAnalysisKind(const std::string& k) {
static std::unordered_map<std::string, c10::AliasAnalysisKind> key_map = {
{"CONSERVATIVE", c10::AliasAnalysisKind::CONSERVATIVE},
{"FROM_SCHEMA", c10::AliasAnalysisKind::FROM_SCHEMA},
{"PURE_FUNCTION", c10::AliasAnalysisKind::PURE_FUNCTION},
{"", c10::AliasAnalysisKind::FROM_SCHEMA}, // default
};
auto it = key_map.find(k);
TORCH_CHECK(it != key_map.end(), "could not parse ", k);
return it->second;
}
template <typename Func>
inline c10::CppFunction dispatch_str(const char* key, Func&& raw_f) {
auto mb_key = parseDispatchKey(key);
if (mb_key) {
return c10::dispatch(*mb_key, std::move(raw_f));
} else {
c10::CppFunction f(std::forward<Func>(raw_f));
return f;
}
}
void initDispatchBindings(PyObject* module) {
auto m = py::handle(module).cast<py::module>();
py::class_<c10::OperatorHandle>(m, "_DispatchOperatorHandle")
.def("schema", &c10::OperatorHandle::schema);
// TODO: figure out how to do chaining
py::class_<c10::Module>(m, "_DispatchModule")
.def("def_", [](py::object self, const char* schema, const char* alias) {
self.cast<c10::Module&>().def(torch::schema(schema, parseAliasAnalysisKind(alias)));
return self;
}, "", py::arg("schema"), py::arg("alias") = "")
// Simulated "legacy" def where alias analysis kind is not set.
// Ordinarily this can only be exercised from RegisterOperators() API
// but I am not going to bind that here
.def("def_legacy", [](py::object self, const char* schema) {
self.cast<c10::Module&>().def(torch::jit::parseSchema(schema));
return self;
}, "", py::arg("schema"))
// We can't conveniently turn Python functions into valid functions
// in the dispatcher. So instead we provide a bunch of precanned
// functions for testing purposes. You're NOT intended to actually
// call these functions; they're just here so we can actually register
// something
//
// Mangling scheme: args_rets. One character per.
// t = Tensor
.def("def_name_t_t", [](py::object self, const char* name, const char* dispatch, const char* debug) {
self.cast<c10::Module&>().def(
name,
dispatch_str(dispatch, [](const at::Tensor& a) {
return a;
}).debug(debug)
);
return self;
}, "", py::arg("name"),
py::arg("dispatch") = "",
py::arg("debug") = "default_def_name_t_t")
.def("def_schema_t_t", [](py::object self, const char* schema, const char* dispatch, const char* alias, const char* debug) {
self.cast<c10::Module&>().def(
torch::schema(schema, parseAliasAnalysisKind(alias)),
dispatch_str(dispatch, [](const at::Tensor& a) {
return a;
}).debug(debug)
);
return self;
}, "", py::arg("name"),
py::arg("dispatch") = "",
py::arg("alias") = "",
py::arg("debug") = "default_def_schema_t_t")
// TODO: maybe consider deduplicating the definitions here, it's getting
// pretty long
.def("impl_t_t", [](py::object self, const char* name, const char* dispatch, const char* debug) {
self.cast<c10::Module&>().impl(
name,
dispatch_str(dispatch, [](const at::Tensor& a) {
return a;
}).debug(debug)
);
return self;
}, "", py::arg("name"),
py::arg("dispatch") = "",
py::arg("debug") = "impl_t_t")
.def("impl_tt_t", [](py::object self, const char* name, const char* dispatch, const char* debug) {
self.cast<c10::Module&>().impl(
name,
dispatch_str(dispatch, [](const at::Tensor& a, const at::Tensor& b) {
return a;
}).debug(debug)
);
return self;
}, "", py::arg("name"), py::arg("dispatch") = "", py::arg("debug") = "")
;
m.def("_dispatch_import", [](std::string name) {
if (name.empty()) {
return std::make_unique<c10::Module>(torch::import());
} else {
return std::make_unique<c10::Module>(c10::_import_DOES_NOT_WORK_WITH_MOBILE_CUSTOM_BUILD(name));
}
});
m.def("_dispatch_dump", [](const char* name) -> std::string {
auto op = c10::Dispatcher::singleton().findOp(torch::jit::parseName(name));
if (!op) {
return "";
} else {
return op->dumpState();
}
});
m.def("_dispatch_check_invariants", [](const char* name) {
auto op = c10::Dispatcher::singleton().findOp(torch::jit::parseName(name));
if (!op) {
} else {
return op->checkInvariants();
}
});
m.def("_dispatch_check_all_invariants", []() {
c10::Dispatcher::singleton().checkInvariants();
});
}
}}} // namespace torch::impl::dispatch