pytorch/test/cpp/lazy/test_ir.cpp
Will Constable 69389fb542 Sync lazy_tensor_staging back to master (#72875)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/72875

This diff contains changes from several PRs landed to lazy_tensor_staging branch.
* generating 'fallback' overrides for each codegenned op, useful for debugging
* supports operators which are missing aten:: symbols for op names, instead using their string counterpart
* makes the IR class a base class instead of hardcoding the assumption of TS

It also resolves lint issues and in particular cleans up the following:
* {Type}s shouldn't be passed into isValueType, and using the catch-all base class of CType is nicer than specifying a list of types.

Fixes #72852

Test Plan: test manually on lazy_tensor_staging branch

Reviewed By: shunting314

Differential Revision: D34250357

fbshipit-source-id: aa7d589f605055d5d02bc77c77fa6f1182ff7497
(cherry picked from commit 2f8f5e4971)
2022-02-18 03:49:46 +00:00

101 lines
3.1 KiB
C++

#include <gtest/gtest.h>
#include <c10/util/Exception.h>
#include <torch/csrc/lazy/core/config.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/ir_metadata.h>
#include <torch/csrc/lazy/ts_backend/ts_node.h>
namespace torch {
namespace lazy {
class TestLeafNode : public Node {
public:
explicit TestLeafNode(size_t param)
: Node(OpKind(), /* num_outputs */ 1, /* hash_func */[&](bool /*bakeInSizes*/) -> hash_t { return Hash(param); }),
param_(param) {}
~TestLeafNode() override = default;
const std::vector<Output>& operands() const override {
TORCH_INTERNAL_ASSERT(false, "Can't access operands of leaf node");
}
const Output& operand(size_t i) const override {
TORCH_INTERNAL_ASSERT(false, "Can't access operand[i] of leaf node");
}
private:
size_t param_;
};
TEST(IrTest, BasicTest) {
NodePtr node1 = MakeNode<TestLeafNode>(1);
NodePtr node2 = MakeNode<TestLeafNode>(2);
EXPECT_NE(node1->hash(), node2->hash());
EXPECT_EQ(node1->num_outputs(), 1);
const TestLeafNode* leafptr = NodeCast<TestLeafNode>(node1.get(), OpKind());
EXPECT_TRUE(leafptr != nullptr);
}
TEST(IrTest, MetaDataTest) {
bool restore_FLAGS_torch_lazy_ir_debug = FLAGS_torch_lazy_ir_debug;
FLAGS_torch_lazy_ir_debug = false;
NodePtr node = MakeNode<TestLeafNode>(1);
auto metaWithoutDebug = node->metadata();
EXPECT_EQ(metaWithoutDebug.scope.size(), 0);
EXPECT_EQ(metaWithoutDebug.frame_info.size(), 0);
FLAGS_torch_lazy_ir_debug = true;
node = MakeNode<TestLeafNode>(1);
auto metaWithEmptyDebug = node->metadata();
EXPECT_EQ(metaWithEmptyDebug.scope.size(), 0);
EXPECT_EQ(metaWithEmptyDebug.frame_info.size(), 0);
{
ScopePusher scope("TestScope");
node = MakeNode<TestLeafNode>(1);
auto metaWithScope = node->metadata();
EXPECT_EQ(metaWithScope.scope, "TestScope.1");
EXPECT_EQ(metaWithScope.frame_info.size(), 0);
}
SourceLocation dummySourceLocation;
dummySourceLocation.file = "file";
dummySourceLocation.function = "function";
dummySourceLocation.line = 10;
RegisterGetFrameInfo(
[&]() -> std::vector<SourceLocation> { return {dummySourceLocation}; });
node = MakeNode<TestLeafNode>(1);
auto metaWithSourceLoc = node->metadata();
EXPECT_EQ(metaWithSourceLoc.scope.size(), 0);
EXPECT_EQ(metaWithSourceLoc.frame_info.size(), 1);
EXPECT_EQ(metaWithSourceLoc.frame_info[0].file, "file");
EXPECT_EQ(metaWithSourceLoc.frame_info[0].function, "function");
EXPECT_EQ(metaWithSourceLoc.frame_info[0].line, 10);
FLAGS_torch_lazy_ir_debug = restore_FLAGS_torch_lazy_ir_debug;
}
TEST(IrTest, TsNode) {
NodePtr node1 = MakeNode<TsNode>(
OpKind(at::aten::view),
Shape(),
/*num_outputs*/ 1,
/*hash_seed*/ kHashSeed);
NodePtr node2 = MakeNode<TsNode>(
OpKind(at::aten::view),
Shape(),
/*num_outputs*/ 1,
/*hash_seed*/ kHashSeed);
EXPECT_EQ(node1->hash(), node2->hash());
EXPECT_EQ(node1->num_outputs(), 1);
const TsNode* leafptr = NodeCast<TsNode>(node1.get(), OpKind(at::aten::view));
EXPECT_TRUE(leafptr != nullptr);
}
} // namespace lazy
} // namespace torch