mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/61933 ### Issue: SubModules with same name are not serialized correctly in bytecode format while using `_save_for_mobile`. These submodules are not distinguished as different modules even though they have different foward, setstate etc if they have the same name. ### Fix: Mangler creates unique names so that modules and submodules that have same names can be uniquely identified while saving the module. iseeyuan rightly pointed out the underlying issue that mangler is not used in the process of saving bytecode and hence unique references for the submodules are not created. Please refer to the notebook to repro the issue: N777224 ### Diff: The above idea of fix is implemented. The mangled names are used in bytecode thereby the files in `code/` directory now have right reference to the `bytecode.pkl` Will this have backward compatibility? iseeyuan please feel free to correct or update this. Yes. This fix impacts only modules with same name sub modules which were not serialized correctly before. Existing modules should have correct references and `_load_for_mobile` must not see any change. To confirm this the existing test cases need to pass for the diff to be approved and shipped. ghstack-source-id: 134242696 Test Plan: ``` ~/fbsource/fbcode > buck test caffe2/test/cpp/jit:jit -- BackendTest.TestCompositeWithSetStates Downloaded 0/5 artifacts, 0.00 bytes, 100.0% cache miss (for updated rules) Building: finished in 19.2 sec (100%) 17619/17619 jobs, 3/17619 updated Total time: 19.5 sec More details at https://www.internalfb.com/intern/buck/build/91542d50-25f2-434d-9e1a-b93117f4efe1 Tpx test run coordinator for Facebook. See https://fburl.com/tpx for details. Running with tpx session id: de9e27cf-4c6c-4980-8bc5-b830b7c9c534 Trace available for this run at /tmp/tpx-20210719-161607.659665/trace.log Started reporting to test run: https://www.internalfb.com/intern/testinfra/testrun/844425127206388 ✓ ListingSuccess: caffe2/test/cpp/jit:jit - main (8.140) ✓ Pass: caffe2/test/cpp/jit:jit - BackendTest.TestCompositeWithSetStates (0.528) Summary Pass: 1 ListingSuccess: 1 If you need help understanding your runs, please follow the wiki: https://fburl.com/posting_in_tpx_users Finished test run: https://www.internalfb.com/intern/testinfra/testrun/844425127206388 ``` ``` ~/fbsource/fbcode > buck test caffe2/test/cpp/jit:jit -- BackendTest.TestConsistencyOfCompositeWithSetStates Building: finished in 4.7 sec (100%) 6787/6787 jobs, 0/6787 updated Total time: 5.0 sec More details at https://www.internalfb.com/intern/buck/build/63d6d871-1dd9-4c72-a63b-ed91900c4dc9 Tpx test run coordinator for Facebook. See https://fburl.com/tpx for details. Running with tpx session id: 81023cd2-c1a2-498b-81b8-86383d73d23b Trace available for this run at /tmp/tpx-20210722-160818.436635/trace.log Started reporting to test run: https://www.internalfb.com/intern/testinfra/testrun/8725724325952153 ✓ ListingSuccess: caffe2/test/cpp/jit:jit - main (7.867) ✓ Pass: caffe2/test/cpp/jit:jit - BackendTest.TestConsistencyOfCompositeWithSetStates (0.607) Summary Pass: 1 ListingSuccess: 1 If you need help understanding your runs, please follow the wiki: https://fburl.com/posting_in_tpx_users Finished test run: https://www.internalfb.com/intern/testinfra/testrun/8725724325952153 ``` To check the `bytecode.pkl` using module inspector please check: N1007089 Reviewed By: iseeyuan Differential Revision: D29669831 fbshipit-source-id: 504dfcb5f7446be5e1c9bd31f0bd9c986ce1a647
728 lines
24 KiB
C++
728 lines
24 KiB
C++
#include <gtest/gtest.h>
|
|
#include <test/cpp/jit/test_utils.h>
|
|
#include <torch/csrc/jit/api/module.h>
|
|
#include <torch/csrc/jit/backends/backend_detail.h>
|
|
#include <torch/csrc/jit/mobile/import.h>
|
|
#include <torch/csrc/jit/serialization/import.h>
|
|
#include <torch/torch.h>
|
|
|
|
// Tests go in torch::jit
|
|
namespace torch {
|
|
namespace jit {
|
|
TEST(BackendTest, ToBackend) {
|
|
Module m("m");
|
|
m.define(R"(
|
|
def forward(self, x, h):
|
|
return self.accum(x, h), self.sub_accum(x, h)
|
|
|
|
def accum(self, x, h):
|
|
return x + h
|
|
|
|
def sub_accum(self, x, h):
|
|
return x - h
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(2.0 * torch::ones({}));
|
|
inputs.emplace_back(1.0 * torch::ones({}));
|
|
auto ref = m.forward(inputs).toTuple()->elements();
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// lowered module
|
|
auto lm = torch::jit::detail::codegen_backend_module(
|
|
"test_backend", m, compile_spec, any_dict_ty);
|
|
// lowered module code:
|
|
/*
|
|
class test_backendLoweredModule(Module):
|
|
__parameters__ = []
|
|
__buffers__ = []
|
|
__processed_module : Any
|
|
__method_compile_spec : Dict[str, Any]
|
|
__backend : __torch__.torch.classes.__backends__.test_backend
|
|
__handles : Dict[str, Any]
|
|
def __create_backend(self: torch.jit.test_backendLoweredModule) -> None:
|
|
_0 =
|
|
__torch__.torch.classes.__backends__.test_backend.__new__(__torch__.torch.classes.__backends__.test_backend)
|
|
_1 = (_0).__init__()
|
|
self.__backend = _0
|
|
return None
|
|
def __getstate__(self: torch.jit.test_backendLoweredModule) ->
|
|
Tuple[Dict[str, Any], Any]: _2 = (self.__method_compile_spec,
|
|
self.__processed_module) return _2 def __setstate__(self:
|
|
torch.jit.test_backendLoweredModule, state: Tuple[Dict[str, Any], Any]) ->
|
|
None: self.__method_compile_spec = (state)[0] self.__processed_module =
|
|
(state)[1] _3 = (self).__create_backend() _4 =
|
|
(self.__backend).compile(self.__processed_module,
|
|
self.__method_compile_spec, ) self.__handles = _4 return None def
|
|
forward(self: torch.jit.test_backendLoweredModule, x: Tensor, h: Tensor) ->
|
|
Tuple[Tensor, Tensor]: _5 = uninitialized(Tensor) typed_inputs =
|
|
annotate(List[Any], [x, h]) _6 =
|
|
(self.__backend).execute((self.__handles)["forward"], typed_inputs, ) _7,
|
|
_8, = _6 _9 = isinstance(_7, Tensor) if _9: _10 = unchecked_cast(Tensor, _7)
|
|
else:
|
|
ops.prim.RaiseException("AssertionError: ")
|
|
_10 = _5
|
|
_11 = isinstance(_8, Tensor)
|
|
if _11:
|
|
_12 = unchecked_cast(Tensor, _8)
|
|
else:
|
|
ops.prim.RaiseException("AssertionError: ")
|
|
_12 = _5
|
|
return (_10, _12)
|
|
|
|
*/
|
|
auto res = lm.forward(inputs).toTuple()->elements();
|
|
AT_ASSERT(res[0].toTensor().equal(ref[0].toTensor()));
|
|
AT_ASSERT(res[1].toTensor().equal(ref[1].toTensor()));
|
|
}
|
|
|
|
TEST(BackendTest, ToBackendNotAvailable) {
|
|
Module m("m");
|
|
m.define(R"(
|
|
def forward(self, x, h):
|
|
return self.accum(x, h), self.sub_accum(x, h)
|
|
|
|
def accum(self, x, h):
|
|
return x + h
|
|
|
|
def sub_accum(self, x, h):
|
|
return x - h
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(2.0 * torch::ones({}));
|
|
inputs.emplace_back(1.0 * torch::ones({}));
|
|
auto ref = m.forward(inputs).toTuple()->elements();
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// Produce lowered module (backend not available).
|
|
// Exception is not thrown at this point.
|
|
auto lm = torch::jit::detail::codegen_backend_module(
|
|
"test_backend_unavailable", m, compile_spec, any_dict_ty);
|
|
// Validate exception is thrown when trying to execute and
|
|
// the backend is not available.
|
|
ASSERT_THROWS_WITH_MESSAGE(
|
|
lm.forward(inputs).toTuple()->elements(), "Backend is not available.");
|
|
}
|
|
|
|
TEST(BackendTest, TestCompiler) {
|
|
Module m("m");
|
|
m.define(R"(
|
|
def forward(self, x, h):
|
|
return x + h
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(2.0 * torch::ones({}));
|
|
inputs.emplace_back(1.0 * torch::ones({}));
|
|
auto ref = m.forward(inputs);
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// lowered module
|
|
auto lm = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", m, compile_spec, any_dict_ty);
|
|
auto res = lm.forward(inputs);
|
|
AT_ASSERT(res.toTensor().equal(ref.toTensor()));
|
|
|
|
std::stringstream ss;
|
|
lm._save_for_mobile(ss);
|
|
auto mlm = _load_for_mobile(ss);
|
|
auto mres = mlm.forward(inputs);
|
|
AT_ASSERT(mres.toTensor().equal(ref.toTensor()));
|
|
}
|
|
|
|
TEST(BackendTest, TestComposite) {
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
|
|
Module m_add("m_add");
|
|
m_add.define(R"(
|
|
def forward(self, x, y):
|
|
return x + y
|
|
)");
|
|
auto lm_add = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", m_add, compile_spec, any_dict_ty);
|
|
|
|
Module m_sub("m_sub");
|
|
m_sub.define(R"(
|
|
def forward(self, x, y):
|
|
return x - y
|
|
)");
|
|
auto lm_sub = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", m_sub, compile_spec, any_dict_ty);
|
|
|
|
Module c("C");
|
|
c.register_module("Add", lm_add);
|
|
c.register_module("Sub", lm_sub);
|
|
c.define(R"(
|
|
def forward(self, x, y):
|
|
return self.Add.forward(x, y) * self.Sub.forward(x, y)
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(3.0 * torch::ones({}));
|
|
inputs.emplace_back(1.0 * torch::ones({}));
|
|
auto res_jit = c.forward(inputs);
|
|
|
|
std::stringstream ss;
|
|
c._save_for_mobile(ss);
|
|
auto mc = _load_for_mobile(ss);
|
|
auto res_mobile = mc.forward(inputs);
|
|
|
|
AT_ASSERT(res_jit.toTensor().equal(res_mobile.toTensor()));
|
|
}
|
|
|
|
Module getCompositeModuleWithSameNameSubModules() {
|
|
// Two submodules with same module name but different forward and other
|
|
// functions should be serialized and loaded correctly.
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
|
|
Module sub1("m_add");
|
|
sub1.define(R"(
|
|
def forward(self, x, y):
|
|
return x + y
|
|
)");
|
|
auto lowered_sub1 = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", sub1, compile_spec, any_dict_ty);
|
|
|
|
Module sub2("m_add");
|
|
sub2.define(R"(
|
|
def forward(self, x, y):
|
|
return x - y
|
|
)");
|
|
auto lowered_sub2 = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", sub2, compile_spec, any_dict_ty);
|
|
|
|
Module c("C");
|
|
c.register_module("Add", lowered_sub1);
|
|
c.register_module("Sub", lowered_sub2);
|
|
c.define(R"(
|
|
def forward(self, a, b, s:int):
|
|
c = self.Add.forward(a, b)
|
|
d = self.Sub.forward(a, b)
|
|
y = s * (c * d)
|
|
return y
|
|
)");
|
|
|
|
return c;
|
|
}
|
|
|
|
TEST(BackendTest, TestCompositeWithSetStates) {
|
|
Module c = getCompositeModuleWithSameNameSubModules();
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(torch::ones({}));
|
|
inputs.emplace_back(3.0 * torch::ones({}));
|
|
inputs.emplace_back(3);
|
|
auto res_jit = c.forward(inputs);
|
|
|
|
std::stringstream ss;
|
|
c._save_for_mobile(ss);
|
|
auto mc = _load_for_mobile(ss);
|
|
auto res_mobile = mc.forward(inputs);
|
|
AT_ASSERT(res_jit.toTensor().equal(res_mobile.toTensor()));
|
|
}
|
|
|
|
TEST(BackendTest, TestConsistencyOfCompositeWithSetStates) {
|
|
Module c = getCompositeModuleWithSameNameSubModules();
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(torch::ones({}));
|
|
inputs.emplace_back(3.0 * torch::ones({}));
|
|
inputs.emplace_back(3);
|
|
|
|
std::stringstream ss, ss_resave;
|
|
c._save_for_mobile(ss);
|
|
auto mc = _load_for_mobile(ss);
|
|
auto res_mobile = mc.forward(inputs);
|
|
|
|
// check if the methods names are always the same
|
|
// by reloading the script module and saving it back as mobile
|
|
// The below checks ensure that the names of Methods
|
|
// and numerical outputs of mobile and reloaded mobile
|
|
// modules are same.
|
|
auto script_module_load = torch::jit::load(ss);
|
|
script_module_load._save_for_mobile(ss_resave);
|
|
auto mc_reload = _load_for_mobile(ss_resave);
|
|
auto res_mobile_reload = mc_reload.forward(inputs);
|
|
|
|
AT_ASSERT(res_mobile_reload.toTensor().equal(res_mobile.toTensor()));
|
|
|
|
auto mc_methods = mc.get_methods();
|
|
auto mc_reload_methods = mc_reload.get_methods();
|
|
|
|
std::vector<std::string> mc_method_qns, mc_reload_method_qns;
|
|
|
|
auto get_qual_name = [](mobile::Method method) -> std::string {
|
|
return method.function().qualname().qualifiedName();
|
|
};
|
|
|
|
std::transform(
|
|
mc_methods.begin(),
|
|
mc_methods.end(),
|
|
std::back_inserter(mc_method_qns),
|
|
get_qual_name);
|
|
|
|
std::transform(
|
|
mc_reload_methods.begin(),
|
|
mc_reload_methods.end(),
|
|
std::back_inserter(mc_reload_method_qns),
|
|
get_qual_name);
|
|
|
|
AT_ASSERT(std::equal(
|
|
mc_method_qns.begin(),
|
|
mc_method_qns.end(),
|
|
mc_reload_method_qns.begin()));
|
|
}
|
|
|
|
TEST(BackendTest, TestCompilerNotSupport) {
|
|
Module m("m");
|
|
m.define(R"(
|
|
def forward(self, x, h):
|
|
return x * h
|
|
)");
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// lowered module
|
|
ASSERT_THROWS_WITH_MESSAGE(
|
|
torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", m, compile_spec, any_dict_ty),
|
|
"The node of aten::mul is not supported in this compiler. Source code:");
|
|
}
|
|
|
|
TEST(BackendTestDebugInfo, TestCompiler) {
|
|
Module m("m");
|
|
m.define(R"(
|
|
def forward(self, x, h):
|
|
return x + h
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(torch::rand({2, 4}));
|
|
inputs.emplace_back(torch::rand({13, 9}));
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// lowered module
|
|
auto lm = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", m, compile_spec, any_dict_ty);
|
|
|
|
std::stringstream ss;
|
|
lm._save_for_mobile(ss, ExtraFilesMap(), true);
|
|
auto mlm = _load_for_mobile(ss);
|
|
std::string error_pattern = R"(
|
|
Module hierarchy:top(m).aten::add
|
|
Traceback of TorchScript (most recent call last):
|
|
File "<string>", line 5, in FunctionName_UNKNOWN
|
|
typed_inputs: List[Any] = [x, h, ]
|
|
if self.__backend.is_available() :
|
|
_0, = self.__backend.execute(self.__handles["forward"], typed_inputs)
|
|
~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
|
|
assert isinstance(_0, Tensor)
|
|
return _0
|
|
File "<string>", line 3, in FunctionName_UNKNOWN
|
|
|
|
def forward(self, x, h):
|
|
return x + h
|
|
~~~~~ <--- HERE
|
|
)";
|
|
ASSERT_THROWS_WITH_MESSAGE(mlm.forward(inputs), error_pattern);
|
|
}
|
|
|
|
TEST(BackendTestDebugInfo, TestExceptionStackForCompilerWithModuleHierarchy) {
|
|
Module a("A");
|
|
a.define(R"(
|
|
def forward(self, x, y):
|
|
return x + y
|
|
)");
|
|
Module b("B");
|
|
b.define(R"(
|
|
def forward(self, x):
|
|
return x + 2
|
|
)");
|
|
Module c("C");
|
|
c.register_module("A0", a);
|
|
c.register_module("B0", b);
|
|
c.define(R"(
|
|
def forward(self, x, y):
|
|
return self.A0.forward(x, y) + self.B0.forward(x)
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(torch::rand({2, 4}));
|
|
inputs.emplace_back(torch::rand({13, 9}));
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// lowered module
|
|
auto lm = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", c, compile_spec, any_dict_ty);
|
|
|
|
std::stringstream ss;
|
|
lm._save_for_mobile(ss, ExtraFilesMap(), true);
|
|
auto mlm = _load_for_mobile(ss);
|
|
std::string error_pattern = R"(
|
|
Module hierarchy:top(C).A0(A).aten::add
|
|
Traceback of TorchScript (most recent call last):
|
|
File "<string>", line 5, in FunctionName_UNKNOWN
|
|
typed_inputs: List[Any] = [x, y, ]
|
|
if self.__backend.is_available() :
|
|
_0, = self.__backend.execute(self.__handles["forward"], typed_inputs)
|
|
~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
|
|
assert isinstance(_0, Tensor)
|
|
return _0
|
|
File "<string>", line 3, in FunctionName_UNKNOWN
|
|
|
|
def forward(self, x, y):
|
|
return self.A0.forward(x, y) + self.B0.forward(x)
|
|
~~~~~~~~~~~~~~~ <--- HERE
|
|
|
|
File "<string>", line 3, in forward
|
|
|
|
def forward(self, x, y):
|
|
return x + y
|
|
~~~~~ <--- HERE
|
|
)";
|
|
ASSERT_THROWS_WITH_MESSAGE(mlm.forward(inputs), error_pattern);
|
|
}
|
|
|
|
TEST(
|
|
BackendTestDebugInfo,
|
|
TestExceptionStackForCompilerWithTwoLevelModuleHierarchy) {
|
|
Module a("A");
|
|
a.define(R"(
|
|
def forward(self, x, y):
|
|
return x + y
|
|
)");
|
|
Module b("B");
|
|
b.register_module("A0", a);
|
|
b.define(R"(
|
|
def forward(self, x, y):
|
|
return self.A0.forward(x, y) + 2
|
|
)");
|
|
Module c("C");
|
|
c.register_module("B0", b);
|
|
c.define(R"(
|
|
def forward(self, x, y):
|
|
return self.B0.forward(x, y) + 3
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(torch::rand({2, 4}));
|
|
inputs.emplace_back(torch::rand({13, 9}));
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// lowered module
|
|
auto lm = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", c, compile_spec, any_dict_ty);
|
|
|
|
std::stringstream ss;
|
|
lm._save_for_mobile(ss, ExtraFilesMap(), true);
|
|
auto mlm = _load_for_mobile(ss);
|
|
/*
|
|
* Error stack throw will look like this:
|
|
* Module hierarchy:top(backend_with_compiler_demoLoweredModule).B0(B).A0(A)
|
|
* Traceback of TorchScript (most recent call last):
|
|
* File "<string>", line 5, in FunctionName_UNKNOWN
|
|
* typed_inputs: List[Any] = [x, y, ]
|
|
* if self.__backend.is_available() :
|
|
* _0, = self.__backend.execute(self.__handles["forward"],
|
|
* typed_inputs)
|
|
* ~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
|
|
* assert isinstance(_0, Tensor)
|
|
* return _0
|
|
* File "<string>", line 3, in FunctionName_UNKNOWN
|
|
*
|
|
* def forward(self, x, y):
|
|
* return self.B0.forward(x, y) + 3
|
|
* ~~~~~~~~~~~~~~~ <--- HERE
|
|
*
|
|
* File "<string>", line 3, in FunctionName_UNKNOWN
|
|
*
|
|
* def forward(self, x, y):
|
|
* return self.A0.forward(x, y) + 2
|
|
* ~~~~~~~~~~~~~~~ <--- HERE
|
|
*
|
|
* File "<string>", line 3, in FunctionName_UNKNOWN
|
|
*
|
|
* def forward(self, x, y):
|
|
* return x + y
|
|
* ~~~~~ <--- HERE
|
|
*
|
|
*/
|
|
std::string error_pattern = R"(
|
|
Module hierarchy:top(C).B0(B).A0(A).aten::add
|
|
Traceback of TorchScript (most recent call last):
|
|
File "<string>", line 5, in FunctionName_UNKNOWN
|
|
typed_inputs: List[Any] = [x, y, ]
|
|
if self.__backend.is_available() :
|
|
_0, = self.__backend.execute(self.__handles["forward"], typed_inputs)
|
|
~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
|
|
assert isinstance(_0, Tensor)
|
|
return _0
|
|
File "<string>", line 3, in FunctionName_UNKNOWN
|
|
|
|
def forward(self, x, y):
|
|
return self.B0.forward(x, y) + 3
|
|
~~~~~~~~~~~~~~~ <--- HERE
|
|
|
|
File "<string>", line 3, in forward
|
|
|
|
def forward(self, x, y):
|
|
return self.A0.forward(x, y) + 2
|
|
~~~~~~~~~~~~~~~ <--- HERE
|
|
|
|
File "<string>", line 3, in forward
|
|
|
|
def forward(self, x, y):
|
|
return x + y
|
|
~~~~~ <--- HERE
|
|
)";
|
|
ASSERT_THROWS_WITH_MESSAGE(mlm.forward(inputs), error_pattern);
|
|
}
|
|
|
|
TEST(BackendTestDebugInfo, TestExceptionStackForCompilerWithLoweredSubModule) {
|
|
std::shared_ptr<CompilationUnit> cu = std::make_shared<CompilationUnit>();
|
|
Module a("A");
|
|
a.define(R"(
|
|
def forward(self, x, y):
|
|
return x + y
|
|
)");
|
|
Module b("B");
|
|
b.define(R"(
|
|
def forward(self, x):
|
|
return x + 2
|
|
)");
|
|
Module c("C");
|
|
c.register_module("A0", a);
|
|
c.register_module("B0", b);
|
|
c.define(R"(
|
|
def forward(self, x, y):
|
|
return self.A0.forward(x, y) + self.B0.forward(x)
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(torch::rand({2, 4}));
|
|
inputs.emplace_back(torch::rand({13, 9}));
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
IValue submodule = c.attr("A0");
|
|
Module current_sm = submodule.toModule();
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// lowered module
|
|
auto lowered_submodule = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", current_sm, compile_spec, any_dict_ty);
|
|
|
|
c.type()->unsafeChangeAttributeType("A0", lowered_submodule.type());
|
|
c.setattr("A0", lowered_submodule._ivalue());
|
|
std::unordered_map<TypePtr, TypePtr> type_remap;
|
|
type_remap[a.type()] = lowered_submodule.type();
|
|
auto type_remap_fn = [&type_remap](TypePtr in) {
|
|
auto it = type_remap.find(in);
|
|
if (it == type_remap.end())
|
|
return in;
|
|
return it->second;
|
|
};
|
|
for (auto& fn : c.type()->methods()) {
|
|
auto method = c.get_method(fn->name());
|
|
auto graph = method.graph();
|
|
graph->remapTypes(type_remap_fn);
|
|
auto new_schema = fn->getSchema().cloneWithRemappedTypes(type_remap_fn);
|
|
fn->setSchema(new_schema);
|
|
}
|
|
|
|
std::stringstream ss;
|
|
c._save_for_mobile(ss, ExtraFilesMap(), true);
|
|
auto c_loaded = _load_for_mobile(ss);
|
|
std::string error_pattern = R"(
|
|
Module hierarchy:top(C).A0(A).aten::add
|
|
Traceback of TorchScript (most recent call last):
|
|
File "<string>", line 3, in FunctionName_UNKNOWN
|
|
|
|
def forward(self, x, y):
|
|
return self.A0.forward(x, y) + self.B0.forward(x)
|
|
~~~~~~~~~~~~~~~ <--- HERE
|
|
|
|
File "<string>", line 5, in forward
|
|
typed_inputs: List[Any] = [x, y, ]
|
|
if self.__backend.is_available() :
|
|
_0, = self.__backend.execute(self.__handles["forward"], typed_inputs)
|
|
~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
|
|
assert isinstance(_0, Tensor)
|
|
return _0
|
|
File "<string>", line 3, in FunctionName_UNKNOWN
|
|
|
|
def forward(self, x, y):
|
|
return x + y
|
|
~~~~~ <--- HERE
|
|
)";
|
|
ASSERT_THROWS_WITH_MESSAGE(c_loaded.forward(inputs), error_pattern);
|
|
}
|
|
|
|
TEST(
|
|
BackendTestDebugInfo,
|
|
TestExceptionStackForCompilerWithSelectiveLoweredSubModule) {
|
|
std::shared_ptr<CompilationUnit> cu = std::make_shared<CompilationUnit>();
|
|
Module aa("AA");
|
|
aa.define(R"(
|
|
def forward(self, x, y):
|
|
return x + y
|
|
)");
|
|
Module a("A");
|
|
a.register_module("AA0", aa);
|
|
a.define(R"(
|
|
def forward(self, x, y):
|
|
return self.AA0.forward(x, y) + 3
|
|
)");
|
|
Module b("B");
|
|
b.define(R"(
|
|
def forward(self, x):
|
|
return x + 2
|
|
)");
|
|
Module c("C");
|
|
c.register_module("A0", a);
|
|
c.register_module("B0", b);
|
|
c.define(R"(
|
|
def forward(self, x, y):
|
|
return self.A0.forward(x, y) + self.B0.forward(x)
|
|
)");
|
|
|
|
std::vector<IValue> inputs;
|
|
inputs.emplace_back(torch::rand({2, 4}));
|
|
inputs.emplace_back(torch::rand({13, 9}));
|
|
|
|
c10::Dict<IValue, IValue> compile_spec(StringType::get(), AnyType::get());
|
|
c10::Dict<IValue, IValue> fake_dict(StringType::get(), AnyType::get());
|
|
fake_dict.insert("", "");
|
|
compile_spec.insert("forward", fake_dict);
|
|
IValue submodule = c.attr("A0");
|
|
Module current_sm = submodule.toModule();
|
|
auto any_dict_ty = DictType::create(StringType::get(), AnyType::get());
|
|
// lowered module
|
|
auto lowered_submodule = torch::jit::detail::codegen_backend_module(
|
|
"backend_with_compiler_demo", current_sm, compile_spec, any_dict_ty);
|
|
|
|
c.type()->unsafeChangeAttributeType("A0", lowered_submodule.type());
|
|
c.setattr("A0", lowered_submodule._ivalue());
|
|
std::unordered_map<TypePtr, TypePtr> type_remap;
|
|
type_remap[a.type()] = lowered_submodule.type();
|
|
auto type_remap_fn = [&type_remap](TypePtr in) {
|
|
auto it = type_remap.find(in);
|
|
if (it == type_remap.end())
|
|
return in;
|
|
return it->second;
|
|
};
|
|
for (auto& fn : c.type()->methods()) {
|
|
auto method = c.get_method(fn->name());
|
|
auto graph = method.graph();
|
|
graph->remapTypes(type_remap_fn);
|
|
auto new_schema = fn->getSchema().cloneWithRemappedTypes(type_remap_fn);
|
|
fn->setSchema(new_schema);
|
|
}
|
|
|
|
std::stringstream ss;
|
|
c._save_for_mobile(ss, ExtraFilesMap(), true);
|
|
auto c_loaded = _load_for_mobile(ss);
|
|
/*
|
|
* Erro stack trace will look like this:
|
|
* Module hierarchy:top(C).A0(backend_with_compiler_demoLoweredModule).AA0(AA)
|
|
* Traceback of TorchScript (most recent call last):
|
|
* File "<string>", line 3, in FunctionName_UNKNOWN
|
|
*
|
|
* def forward(self, x, y):
|
|
* return self.A0.forward(x, y) + self.B0.forward(x)
|
|
* ~~~~~~~~~~~~~~~ <--- HERE
|
|
*
|
|
* File "<string>", line 5, in FunctionName_UNKNOWN
|
|
* typed_inputs: List[Any] = [x, y, ]
|
|
* if self.__backend.is_available() :
|
|
* _0, = self.__backend.execute(self.__handles["forward"],
|
|
* typed_inputs)
|
|
* ~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
|
|
* assert isinstance(_0, Tensor)
|
|
* return _0
|
|
* File "<string>", line 3, in FunctionName_UNKNOWN
|
|
*
|
|
* def forward(self, x, y):
|
|
* return self.AA0.forward(x, y) + 3
|
|
* ~~~~~~~~~~~~~~~~ <--- HERE
|
|
*
|
|
* File "<string>", line 3, in FunctionName_UNKNOWN
|
|
*
|
|
* def forward(self, x, y):
|
|
* return x + y
|
|
* ~~~~~ <--- HERE
|
|
*
|
|
*
|
|
* */
|
|
std::string error_pattern = R"(
|
|
Module hierarchy:top(C).A0(A).AA0(AA).aten::add
|
|
Traceback of TorchScript (most recent call last):
|
|
File "<string>", line 3, in FunctionName_UNKNOWN
|
|
|
|
def forward(self, x, y):
|
|
return self.A0.forward(x, y) + self.B0.forward(x)
|
|
~~~~~~~~~~~~~~~ <--- HERE
|
|
|
|
File "<string>", line 5, in forward
|
|
typed_inputs: List[Any] = [x, y, ]
|
|
if self.__backend.is_available() :
|
|
_0, = self.__backend.execute(self.__handles["forward"], typed_inputs)
|
|
~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
|
|
assert isinstance(_0, Tensor)
|
|
return _0
|
|
File "<string>", line 3, in FunctionName_UNKNOWN
|
|
|
|
def forward(self, x, y):
|
|
return self.AA0.forward(x, y) + 3
|
|
~~~~~~~~~~~~~~~~ <--- HERE
|
|
|
|
File "<string>", line 3, in forward
|
|
|
|
def forward(self, x, y):
|
|
return x + y
|
|
~~~~~ <--- HERE
|
|
)";
|
|
ASSERT_THROWS_WITH_MESSAGE(c_loaded.forward(inputs), error_pattern);
|
|
}
|
|
|
|
} // namespace jit
|
|
} // namespace torch
|