mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary:
I have some test code in there as well, along with a script "test_libtorch" to run it. You'll need to modify `test_libtorch` to point to where you have `pytorch` built. I currently require that `pybind11` is included as a subdirectory of the test, but added it to the `.gitignore` to make this reviewable.
Currently, something like this works:
```cpp
struct Foo {
int x, y;
Foo(): x(2), y(5){}
Foo(int x_, int y_) : x(x_), y(y_) {}
void display() {
cout<<"x: "<<x<<' '<<"y: "<<y<<endl;
}
int64_t add(int64_t z) {
return (x+y)*z;
}
};
static auto test = torch::jit::class_<Foo>("Foo")
.def(torch::jit::init<int64_t, int64_t>())
.def("display", &Foo::display)
.def("add", &Foo::add)
.def("combine", &Foo::combine);
```
with
```py
torch.jit.script
def f(x):
val = torch._C.Foo(5, 3)
val.display()
print(val.add(3))
```
results in
```
x: 5 y: 3
24
```
Current issues:
- [x] The python class created by torchscript doesn't interactly properly with the surrounding code.
```
torch.jit.script
def f(x):
val = torch._C.Foo(5, 3)
return val
```
- [x] Doesn't properly take in non-pointer classes. Can't define this function signature in cpp (We don't want to support this I believe).
```cpp
void combine(Foo x) {
```
- [x] Has some issues with memory for blobs when constructing multiple objects (fix constant propagation pass to not treat capsules as the same object).
```py
torch.jit.script
def f(x):
val = torch._C.Foo(5, 3)
val2 = torch._C.Foo(100, 0)
val.display()
print(val.add(3))
```
- [ ] Can't define multiple constructors (need to define overload string. Currently not possible since we don't support overloaded methods).
- [x] `init` is a little bit different syntax than `pybind`. `.init<...>()` instead of `.def(py::init<>())`
- [x] I couldn't figure out how to add some files into the build so they'd be copied to the `include/` directories, so I symlinked them manually.
- [ ] Currently, the conversion from Python into Torchscript doesn't work.
- [ ] Torchbind also currently requires Python/Pybind dependency. Fixing this would probably involve some kind of macro to bind into Python when possible.
- [ ] We pass back into Python by value, currently. There's no way of passing by reference.
- [x] Currently can only register one method with the same type signature. This is because we create a `static auto opRegistry`, and the function is templated on the type signature.
Somewhat blocked on https://github.com/pytorch/pytorch/pull/21177. We currently use some structures that will be refactored by his PR (namely `return_type_to_ivalue` and `ivalue_to_arg_type`.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/21098
Differential Revision: D16634872
Pulled By: Chillee
fbshipit-source-id: 1408bb89ea649c27d560df59e2cf9920467fe1de
135 lines
3.6 KiB
C++
135 lines
3.6 KiB
C++
#include <torch/csrc/jit/ir.h>
|
|
|
|
#include <algorithm>
|
|
#include <unordered_map>
|
|
|
|
#include <ATen/core/functional.h>
|
|
#include <ATen/core/interned_strings.h>
|
|
#include <c10/util/Exception.h>
|
|
#include <torch/csrc/jit/node_hashing.h>
|
|
#include <torch/csrc/jit/passes/common_subexpression_elimination.h>
|
|
#include <torch/csrc/utils/hash.h>
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
|
|
namespace {
|
|
|
|
bool tensorEqual(const at::Tensor& lhs, const at::Tensor& rhs) {
|
|
return lhs.type() == rhs.type() && lhs.equal(rhs);
|
|
}
|
|
|
|
bool tensorListEqual(
|
|
const std::vector<at::Tensor>& lhs,
|
|
const std::vector<at::Tensor>& rhs) {
|
|
if (lhs.size() != rhs.size())
|
|
return false;
|
|
return std::equal(lhs.begin(), lhs.end(), rhs.begin(), tensorEqual);
|
|
}
|
|
|
|
// Check whether two nodes have the same attributes in CSE.
|
|
// This function may be too conservative for general use.
|
|
// Do NOT support g/gs attributes.
|
|
bool attributesEqualCSE(const Node* lhs, const Node* rhs) {
|
|
AT_ASSERT(lhs != nullptr);
|
|
AT_ASSERT(rhs != nullptr);
|
|
// One has attributes, the other does not.
|
|
if (lhs->hasAttributes() != rhs->hasAttributes())
|
|
return false;
|
|
// Neither has attributes.
|
|
if (!lhs->hasAttributes() && !rhs->hasAttributes())
|
|
return true;
|
|
|
|
auto lnames = lhs->attributeNames();
|
|
auto rnames = rhs->attributeNames();
|
|
std::sort(lnames.begin(), lnames.end());
|
|
std::sort(rnames.begin(), rnames.end());
|
|
if (lnames != rnames)
|
|
return false;
|
|
|
|
for (auto name : lnames) {
|
|
if (lhs->kindOf(name) != rhs->kindOf(name))
|
|
return false;
|
|
|
|
#define COMPARE_ATTRIBUTEVALUE(type) \
|
|
case AttributeKind::type: { \
|
|
if (lhs->type(name) != rhs->type(name)) \
|
|
return false; \
|
|
} break;
|
|
|
|
switch (lhs->kindOf(name)) {
|
|
COMPARE_ATTRIBUTEVALUE(f)
|
|
COMPARE_ATTRIBUTEVALUE(fs)
|
|
COMPARE_ATTRIBUTEVALUE(i)
|
|
COMPARE_ATTRIBUTEVALUE(is)
|
|
COMPARE_ATTRIBUTEVALUE(s)
|
|
COMPARE_ATTRIBUTEVALUE(ss)
|
|
case AttributeKind::t: {
|
|
if (!tensorEqual(lhs->t(name), rhs->t(name)))
|
|
return false;
|
|
break;
|
|
}
|
|
case AttributeKind::ts: {
|
|
if (!tensorListEqual(lhs->ts(name), rhs->ts(name)))
|
|
return false;
|
|
break;
|
|
}
|
|
case AttributeKind::g:
|
|
case AttributeKind::gs:
|
|
return false;
|
|
}
|
|
|
|
#undef COMPARE_ATTRIBUTEVALUE
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
} // anonymous namespace
|
|
|
|
size_t HashNode::operator()(const Node* k) const {
|
|
AT_ASSERT(k != nullptr);
|
|
return get_hash(
|
|
k->kind(),
|
|
fmap(k->outputs(), [](const Value* v) { return v->type()->kind(); }),
|
|
fmap(k->inputs(), [](const Value* v) { return v->unique(); }));
|
|
};
|
|
|
|
bool EqualNode::operator()(const Node* lhs, const Node* rhs) const {
|
|
if (lhs == nullptr && rhs == nullptr)
|
|
return true;
|
|
if (lhs == nullptr || rhs == nullptr)
|
|
return false;
|
|
|
|
if (lhs->kind() != rhs->kind())
|
|
return false;
|
|
|
|
// Check whether the output types are the same.
|
|
auto lhs_outputs = lhs->outputs();
|
|
auto rhs_outputs = rhs->outputs();
|
|
if (lhs_outputs.size() != rhs_outputs.size())
|
|
return false;
|
|
for (size_t i = 0; i < lhs_outputs.size(); ++i) {
|
|
if (*lhs_outputs[i]->type() != *rhs_outputs[i]->type())
|
|
return false;
|
|
if (lhs_outputs[i]->type() == CapsuleType::get())
|
|
return false;
|
|
}
|
|
|
|
// Check whether the inputs are the same.
|
|
auto lhs_inputs = lhs->inputs();
|
|
auto rhs_inputs = rhs->inputs();
|
|
if (lhs_inputs.size() != rhs_inputs.size())
|
|
return false;
|
|
if (!std::equal(lhs_inputs.begin(), lhs_inputs.end(), rhs_inputs.begin()))
|
|
return false;
|
|
|
|
if (!attributesEqualCSE(lhs, rhs))
|
|
return false;
|
|
|
|
return true;
|
|
};
|
|
|
|
} // namespace jit
|
|
} // namespace torch
|