pytorch/test/cpp/lazy/test_trie_cache.cpp
Bin Bao ac37ddc795 [LT] Store OpKind for each IR subclass in a static field
Summary: Currently OpKind is stored as an object field called op_ for each IR
node, and one usage of op_ is to avoid dynamic_cast in NodeCast when we
need to downcast a base-node pointer into a concrete sub-node pointer.
As a result, we need to construct and pass in an op when downcasting
nodes, and this becomes quite anonnying when we start to implement the
trie-based IR node reusing. More importantly, the op for each subclass
should be unique for that subclass and thus making it a const static field
is a more logical design.

In this PR, we still keep the object-level op_ for easier XLA adoption. As
furture work, we can come back to remove op_, make the op() method
virtual, and get rid of OpKind in all the node constructors.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/76711

Approved by: https://github.com/wconstab, https://github.com/JackCaoG
2022-05-06 19:14:46 +00:00

93 lines
2.6 KiB
C++

#include <gtest/gtest.h>
#include <c10/util/Exception.h>
#include <torch/csrc/lazy/core/config.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/ir_builder.h>
#include <torch/csrc/lazy/core/ir_metadata.h>
#include <torch/csrc/lazy/core/ir_util.h>
#include <memory>
namespace torch {
namespace lazy {
class TrieCacheNode : public Node {
public:
static const OpKind class_op_kind;
explicit TrieCacheNode(size_t id)
: Node(class_op_kind, /* num_outputs */ 1), id_(id), hash_(Hash(id_)) {}
~TrieCacheNode() override = default;
bool Equal(size_t id) const {
return (id_ == id);
}
void AddOperand(Value v) {
if (!v.node) {
return;
}
operands_as_outputs_.emplace_back(v.node.get(), v.index);
operands_.push_back(std::move(v.node));
}
hash_t hash() const override { return hash_; }
hash_t shapeHash() const override { return hash_; }
private:
size_t id_;
hash_t hash_;
};
const OpKind TrieCacheNode::class_op_kind = OpKind();
TEST(TrieCacheTest, TestSinglePath) {
FLAGS_torch_lazy_reuse_ir = true;
TrieCache::Get()->Clear();
NodePtr a = MakeNode<TrieCacheNode>(0);
NodePtr b = MakeNode<TrieCacheNode>(1);
NodePtr c = MakeNode<TrieCacheNode>(2);
TrieCache::Get()->ResetCurrent(); // MarkStep
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(0).get(), a.get());
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(1).get(), b.get());
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(2).get(), c.get());
TrieCache::Get()->ResetCurrent(); // MarkStep
}
/*
* 0
* |
* 1
* / \
* 2 3
*/
TEST(TrieCacheTest, TestTwoPaths) {
FLAGS_torch_lazy_reuse_ir = true;
TrieCache::Get()->Clear();
NodePtr a = MakeNode<TrieCacheNode>(0);
NodePtr b = MakeNode<TrieCacheNode>(1);
NodePtr c = MakeNode<TrieCacheNode>(2);
TrieCache::Get()->ResetCurrent(); // MarkStep
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(0).get(), a.get());
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(1).get(), b.get());
NodePtr d = ReuseOrMakeNode<TrieCacheNode>(3);
EXPECT_NE(d.get(), c.get());
TrieCache::Get()->ResetCurrent(); // MarkStep
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(0).get(), a.get());
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(1).get(), b.get());
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(3).get(), d.get());
TrieCache::Get()->ResetCurrent(); // MarkStep
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(0).get(), a.get());
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(1).get(), b.get());
EXPECT_EQ(ReuseOrMakeNode<TrieCacheNode>(2).get(), c.get());
TrieCache::Get()->ResetCurrent(); // MarkStep
}
} // namespace lazy
} // namespace torch