#include #include #include #include #include namespace torch { namespace jit { using namespace torch::jit::tensorexpr; TEST(CppPrinter, AllocateOnStackThenFree) { std::vector dims = {alloc(2), alloc(3)}; BufPtr buf = alloc("x", dims, kInt); AllocatePtr alloc_ = alloc(buf); FreePtr free_ = alloc(buf); BlockPtr block = Block::make({alloc_, free_}); std::stringstream ss; CppPrinter printer(&ss); printer.visit(block); const std::string expected = R"( # CHECK: { # CHECK: int x[6]; # CHECK: } )"; torch::jit::testing::FileCheck().run(expected, ss.str()); } TEST(CppPrinter, AllocateOnHeapThenFree) { std::vector dims = { alloc(20), alloc(50), alloc(3)}; BufPtr buf = alloc("y", dims, kLong); AllocatePtr alloc_ = alloc(buf); FreePtr free_ = alloc(buf); BlockPtr block = Block::make({alloc_, free_}); std::stringstream ss; CppPrinter printer(&ss); printer.visit(block); // size(long) = 8; // dim0 * dim1 * dim2 * size(long) = 24000. const std::string expected = R"( # CHECK: { # CHECK: int64_t* y = static_cast(malloc(24000)); # CHECK: free(y); # CHECK: } )"; torch::jit::testing::FileCheck().run(expected, ss.str()); } } // namespace jit } // namespace torch