mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/67927 BackendData - represents 'tensor data' in opaque backend storage LoweringContext - interface for performing backend-specific IR lowering BackendImplInterface - interface for lazy tensors backends to implement Reorgs backend-related files into lazy/backend subdir includes a few small fixes, which were made on lazy_tensor_staging but need to be back-ported to master. Test Plan: used by lazy_tensor_staging branch Reviewed By: desertfire Differential Revision: D32142032 fbshipit-source-id: 828c717bcd0d511876e64ad209b50f7bfb10cec5
84 lines
2.1 KiB
C++
84 lines
2.1 KiB
C++
#include <gtest/gtest.h>
|
|
|
|
#include <c10/util/Exception.h>
|
|
#include <torch/csrc/lazy/core/config.h>
|
|
#include <torch/csrc/lazy/core/ir.h>
|
|
#include <torch/csrc/lazy/core/ir_metadata.h>
|
|
#include <torch/csrc/lazy/core/ir_util.h>
|
|
|
|
namespace torch {
|
|
namespace lazy {
|
|
|
|
class IrUtilNode : public Node {
|
|
public:
|
|
explicit IrUtilNode()
|
|
: Node(OpKind(), /* num_outputs */ 1, /* hash_seed */ Hash(0)) {}
|
|
~IrUtilNode() override = default;
|
|
|
|
void AddOperand(Value v) {
|
|
if (!v.node) {
|
|
return;
|
|
}
|
|
operands_as_outputs_.emplace_back(v.node.get(), v.index);
|
|
operands_.push_back(std::move(v.node));
|
|
}
|
|
|
|
const std::vector<Output>& operands() const override {
|
|
return operands_as_outputs_;
|
|
}
|
|
|
|
const Output& operand(size_t i) const override {
|
|
return operands_as_outputs_.at(i);
|
|
}
|
|
|
|
private:
|
|
std::vector<NodePtr> operands_;
|
|
std::vector<Output> operands_as_outputs_;
|
|
};
|
|
|
|
/* a
|
|
* / \
|
|
*b c
|
|
* \ /
|
|
* d
|
|
* Post-order: d c b a
|
|
*/
|
|
TEST(IrUtilTest, BasicTest) {
|
|
NodePtr a = MakeNode<IrUtilNode>();
|
|
NodePtr b = MakeNode<IrUtilNode>();
|
|
NodePtr c = MakeNode<IrUtilNode>();
|
|
NodePtr d = MakeNode<IrUtilNode>();
|
|
|
|
dynamic_cast<IrUtilNode*>(a.get())->AddOperand(Value(b, 0));
|
|
dynamic_cast<IrUtilNode*>(a.get())->AddOperand(Value(c, 1));
|
|
dynamic_cast<IrUtilNode*>(b.get())->AddOperand(Value(d, 0));
|
|
dynamic_cast<IrUtilNode*>(c.get())->AddOperand(Value(d, 0));
|
|
|
|
std::vector<Node*> postorder = Util::ComputePostOrder({a.get()});
|
|
EXPECT_EQ(postorder.size(), 4);
|
|
EXPECT_EQ(postorder.at(0), d.get());
|
|
EXPECT_EQ(postorder.at(1), c.get());
|
|
EXPECT_EQ(postorder.at(2), b.get());
|
|
EXPECT_EQ(postorder.at(3), a.get());
|
|
}
|
|
|
|
/* a
|
|
* / \
|
|
*b---c
|
|
* Post-order: not valid
|
|
*/
|
|
TEST(IrUtilTest, TestCircle) {
|
|
NodePtr a = MakeNode<IrUtilNode>();
|
|
NodePtr b = MakeNode<IrUtilNode>();
|
|
NodePtr c = MakeNode<IrUtilNode>();
|
|
|
|
dynamic_cast<IrUtilNode*>(a.get())->AddOperand(Value(b, 0));
|
|
dynamic_cast<IrUtilNode*>(b.get())->AddOperand(Value(c, 0));
|
|
dynamic_cast<IrUtilNode*>(c.get())->AddOperand(Value(a, 0));
|
|
|
|
EXPECT_THROW(Util::ComputePostOrder({a.get()}), c10::Error);
|
|
}
|
|
|
|
} // namespace lazy
|
|
} // namespace torch
|