mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/51797 The C++ API, ```codegen_backend_module``` is added to ```to_<backend>```. Python related stuffs are decoupled in this function. It can be used from both C++ and python. * Tests Python: The existing ```test_backends.py```, which calls the C++ API under the hood. C++: The end-to-end test of ```jit.BackendTest.ToBackend``` is added in ```test_backend.cpp```. The original class definitions in this file is moved to ```test_backend_lib.cpp``` ghstack-source-id: 121687464 (Note: this ignores all push blocking failures!) Test Plan: CI Reviewed By: raziel Differential Revision: D26280518 fbshipit-source-id: fd466e4b448847ce64010a3297fff0b5760c5280
41 lines
1.2 KiB
C++
41 lines
1.2 KiB
C++
#include <gtest/gtest.h>
|
|
#include <torch/csrc/jit/api/module.h>
|
|
#include <torch/csrc/jit/backends/backend_detail.h>
|
|
#include <torch/torch.h>
|
|
|
|
// Tests go in torch::jit
|
|
namespace torch {
|
|
namespace jit {
|
|
TEST(BackendTest, ToBackend) {
|
|
Module m("m");
|
|
m.define(R"(
|
|
def forward(self, x, h):
|
|
return self.accum(x, h), self.sub_accum(x, h)
|
|
|
|
def accum(self, x, h):
|
|
return x + h
|
|
|
|
def sub_accum(self, x, h):
|
|
return x - h
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(2.0 * torch::ones({}));
|
|
inputs.emplace_back(1.0 * torch::ones({}));
|
|
auto ref = m.forward(inputs).toTuple()->elements();
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// lowered module
|
|
auto lm = torch::jit::detail::codegen_backend_module(
|
|
"test_backend", m, compile_spec, any_dict_ty);
|
|
auto res = lm.forward(inputs).toTuple()->elements();
|
|
AT_ASSERT(res[0].toTensor().equal(ref[0].toTensor()));
|
|
AT_ASSERT(res[1].toTensor().equal(ref[1].toTensor()));
|
|
}
|
|
} // namespace jit
|
|
} // namespace torch
|