[AutoAccept][Codemod][FBSourceClangFormatLinter] Daily arc lint --take CLANGFORMAT

Reviewed By: zertosh

Differential Revision: D28319469

fbshipit-source-id: 8295597a8ee16b2fef3f7aacdd6c892cb22db988
This commit is contained in:
CodemodService FBSourceClangFormatLinterBot 2021-05-10 03:37:55 -07:00 committed by Facebook GitHub Bot
parent b84a28b50a
commit cbfce376a8
11 changed files with 128 additions and 351 deletions

View File

@ -119,8 +119,7 @@ TEST(StringViewTest, testCopyAssignment) {
static_assert(5 == (string_view() = "hello").size(), "");
static_assert(
string_equal("hello", (string_view() = "hello").data(), 5),
"");
string_equal("hello", (string_view() = "hello").data(), 5), "");
}
#endif
const string_view hello = assign("hello");

View File

@ -240,8 +240,7 @@ TEST(LiteTrainerTest, SequentialSampler) {
const int kBatchSize = 10;
auto data_loader =
torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
DummyDataset(25),
kBatchSize);
DummyDataset(25), kBatchSize);
int i = 1;
for (const auto& batch : *data_loader) {
for (const auto& example : batch) {

View File

@ -402,13 +402,9 @@ TEST(BoundsInference, MultipleTopLoopLoad) {
Tensor* b =
Compute("b", {{64, "x"}}, [&](const VarHandle& x) { return a.load(x); });
Tensor* c = Compute(
"c",
{{32, "x"}},
[&](const VarHandle& x) { return a.load(x + 10); });
"c", {{32, "x"}}, [&](const VarHandle& x) { return a.load(x + 10); });
Tensor* d = Compute(
"d",
{{96, "x"}},
[&](const VarHandle& x) { return a.load(x + 2); });
"d", {{96, "x"}}, [&](const VarHandle& x) { return a.load(x + 2); });
LoopNest l({b, c, d});
auto bounds_info = inferBounds(l.root_stmt());
@ -522,19 +518,15 @@ TEST(BoundsInference, CacheReads) {
KernelScope kernel_scope;
Tensor* A = Compute(
"A",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return i * j; });
"A", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return i * j;
});
Tensor* B = Compute(
"B",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"B", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 30, j + 3);
});
Tensor* C = Compute(
"C",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"C", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 10, j + 20) + A->load(i + 30, j + 40);
});
@ -674,13 +666,13 @@ TEST(BoundsInference, GetPotentialHazardsLoopNoHazard) {
KernelScope kernel_scope;
Tensor* A = Compute(
"A",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return i * j; });
"A", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return i * j;
});
Tensor* B = Compute(
"B",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return (i + 1) * (j + 1); });
"B", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return (i + 1) * (j + 1);
});
LoopNest l({A, B});
@ -703,13 +695,11 @@ TEST(BoundsInference, GetPotentialHazardsLoopCall) {
KernelScope kernel_scope;
Tensor* A = Compute(
"A",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return i * j; });
"A", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return i * j;
});
Tensor* B = Compute(
"B",
{{64, "i"}, {64, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"B", {{64, "i"}, {64, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i, j) + 5;
});
@ -733,9 +723,9 @@ TEST(BoundsInference, GetPotentialHazardsLoopSplit) {
KernelScope kernel_scope;
Tensor* A = Compute(
"A",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return i * j; });
"A", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return i * j;
});
LoopNest l({A});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@ -1000,9 +990,7 @@ TEST(BoundsInference, HasConflictingOverlapDueToRAWDependence) {
0,
100,
Store::make(
b_buf,
{k},
Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
b_buf, {k}, Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
auto par = Block::make({forJ, forK});
tensorexpr::analysis::MemDependencyChecker analyzer;
@ -1031,9 +1019,7 @@ TEST(BoundsInference, HasConflictingOverlapDueToWARDependence) {
0,
100,
Store::make(
b_buf,
{k},
Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
b_buf, {k}, Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j)));
auto par = Block::make({forK, forJ});
@ -1064,9 +1050,7 @@ TEST(BoundsInference, HasConflictingOverlapWithLoads) {
10,
100,
Store::make(
b_buf,
{k},
Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
b_buf, {k}, Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
auto forJ = For::make(
j,
10,
@ -1104,10 +1088,7 @@ TEST(BoundsInference, IsOverlapping) {
auto storeA2 = Store::make(a_buf, {i + 50}, i * 50);
auto storeA3 = Store::make(a_buf, {i + 150}, i * 150);
auto forI = For::make(
i,
0,
100,
Block::make({storeA1, storeB, storeC, storeA2, storeA3}));
i, 0, 100, Block::make({storeA1, storeB, storeC, storeA2, storeA3}));
tensorexpr::analysis::MemDependencyChecker analyzer;
forI->accept(&analyzer);
ASSERT_TRUE(

View File

@ -36,9 +36,7 @@ TEST(CppPrinter, AllocateOnStackThenFree) {
TEST(CppPrinter, AllocateOnHeapThenFree) {
KernelScope kernel_scope;
std::vector<const Expr*> dims = {
new IntImm(20),
new IntImm(50),
new IntImm(3)};
new IntImm(20), new IntImm(50), new IntImm(3)};
const Buf* buf = new Buf("y", dims, kLong);
Allocate* alloc = new Allocate(buf);
Free* free = new Free(buf);

View File

@ -509,15 +509,11 @@ TEST(ExternalCall, Inlining) {
BufHandle MatmulResultBuf("MatmulResult", {8, 8}, kFloat);
Tensor* A = Compute(
"A",
{{8, "i"}, {8, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"A", {{8, "i"}, {8, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return FloatImm::make(5.0f);
});
Tensor* B = Compute(
"B",
{{8, "i"}, {8, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"B", {{8, "i"}, {8, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return FloatImm::make(4.0f);
});
Tensor* MatmulResult = new Tensor(

View File

@ -32,9 +32,7 @@ void checkIR(Stmt* s, const std::string& pattern) {
TEST(LoopNest, ExprSimple01) {
KernelScope kernel_scope;
Tensor* tensor = Compute(
"f",
{{16, "X"}, {5, "y"}},
[](const VarHandle& x, const VarHandle& y) {
"f", {{16, "X"}, {5, "y"}}, [](const VarHandle& x, const VarHandle& y) {
return ExprHandle(1.0f) + cast<float>(x) * x + cast<float>(y) * y;
});
LoopNest l({tensor});
@ -54,9 +52,7 @@ TEST(LoopNest, ExprSimple01) {
TEST(LoopNest, ExprLower01) {
KernelScope kernel_scope;
Tensor* tensor = Compute(
"f",
{{16, "x"}, {5, "y"}},
[](const VarHandle& x, const VarHandle& y) {
"f", {{16, "x"}, {5, "y"}}, [](const VarHandle& x, const VarHandle& y) {
return ExprHandle(1.0f) + cast<float>(x) * x + cast<float>(y) * y;
});
LoopNest l({tensor});
@ -2682,9 +2678,7 @@ TEST(LoopNest, LoopNestReorderInternalLoopNest) {
TEST(LoopNest, OuterLoopVectorization) {
KernelScope kernel_scope;
Tensor* tensor = Compute(
"f",
{{8, "X"}, {8, "y"}},
[](const VarHandle& x, const VarHandle& y) {
"f", {{8, "X"}, {8, "y"}}, [](const VarHandle& x, const VarHandle& y) {
return ExprHandle(1.0f) + cast<float>(x) * x + cast<float>(y) * y;
});
LoopNest l({tensor});
@ -3374,10 +3368,7 @@ TEST(LoopNest, FlattenImperfectLoopNest) {
auto for_body = Block::make({Store::make(a_buf, {i, j}, i * j)});
auto inner_for = For::make(j, 0, 15, for_body);
auto outer_for = For::make(
i,
0,
10,
Block::make({Store::make(a_buf, {i, i}, 0), inner_for}));
i, 0, 10, Block::make({Store::make(a_buf, {i, i}, 0), inner_for}));
auto par = Block::make({outer_for});
HashProvider hasher;
auto hash_before = hasher.hash(par);
@ -3510,19 +3501,15 @@ TEST(LoopNest, CacheReadsSimple) {
KernelScope kernel_scope;
Tensor* A = Compute(
"A",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return i * j; });
"A", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return i * j;
});
Tensor* B = Compute(
"B",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"B", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 30, j + 3);
});
Tensor* C = Compute(
"C",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"C", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 10, j + 20) + A->load(i + 30, j + 40);
});
@ -3583,19 +3570,15 @@ TEST(LoopNest, CacheReadsOuter) {
KernelScope kernel_scope;
Tensor* A = Compute(
"A",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return i * j; });
"A", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return i * j;
});
Tensor* B = Compute(
"B",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"B", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 30, j + 40) + A->load(i + 31, j + 41);
});
Tensor* C = Compute(
"C",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"C", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 10, j + 20) + A->load(i + 30, j + 40);
});
@ -3636,19 +3619,15 @@ TEST(LoopNest, CacheReadsInternal) {
KernelScope kernel_scope;
Tensor* A = Compute(
"A",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return i * j; });
"A", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return i * j;
});
Tensor* B = Compute(
"B",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"B", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 30, j + 40) + A->load(i + 31, j + 41);
});
Tensor* C = Compute(
"C",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"C", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 10, j + 20) + A->load(i + 30, j + 40);
});
@ -3688,20 +3667,16 @@ TEST(LoopNest, CacheReadsInner) {
KernelScope kernel_scope;
Tensor* A = Compute(
"A",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return i * j; });
"A", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return i * j;
});
// note im changing the offset of the first arg of the first call to A.
Tensor* B = Compute(
"B",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"B", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 34, j + 40) + A->load(i + 30, j + 41);
});
Tensor* C = Compute(
"C",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"C", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 10, j + 20) + A->load(i + 30, j + 40);
});
@ -3741,19 +3716,15 @@ TEST(LoopNest, CacheWritesSimple) {
KernelScope kernel_scope;
Tensor* A = Compute(
"A",
{{64, "i"}, {64, "j"}},
[](const VarHandle& i, const VarHandle& j) { return i * j; });
"A", {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) {
return i * j;
});
Tensor* B = Compute(
"B",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"B", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 30, j + 40) + A->load(i + 31, j + 41);
});
Tensor* C = Compute(
"C",
{{20, "i"}, {10, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"C", {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i + 10, j + 20) + A->load(i + 30, j + 40);
});
@ -3966,9 +3937,7 @@ TEST(LoopNest, CompoundTensorUsed) {
Tensor* A = new Tensor(a_buf.node(), body);
Tensor* B = Compute(
"B",
{{10, "i"}, {3, "j"}},
[&](const VarHandle& i, const VarHandle& j) {
"B", {{10, "i"}, {3, "j"}}, [&](const VarHandle& i, const VarHandle& j) {
return A->load(i, j + 1) + A->load(i, j + 2);
});
@ -4217,9 +4186,7 @@ TEST(LoopNest, VectorizeUse) {
Tensor* b = Compute(
"b", {{N, "n"}}, [&](const VarHandle& n) { return a.load(n) + 1.0f; });
Tensor* c = Compute(
"c",
{{N, "n"}},
[&](const VarHandle& n) { return b->load(n) + 2.0f; });
"c", {{N, "n"}}, [&](const VarHandle& n) { return b->load(n) + 2.0f; });
LoopNest nest({c}, {b, c});
auto loops = nest.getAllLoopNestsWritingToBuf(b->buf())[0];
nest.vectorize(loops[0]);
@ -4748,15 +4715,9 @@ TEST(LoopNest, fuseLoopsNested2DInner) {
VarHandle j("j", kInt);
VarHandle n("n", kInt);
auto forJ = For::make(
j,
0,
100,
Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)));
j, 0, 100, Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)));
auto forN = For::make(
n,
0,
100,
Store::make(b_buf, {i, n}, Add::make(i, Mul::make(n, 100))));
n, 0, 100, Store::make(b_buf, {i, n}, Add::make(i, Mul::make(n, 100))));
auto forI = For::make(i, 0, 20, Block::make({forJ, forN}));
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
For* fused_loop;
@ -5295,10 +5256,7 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies5) {
VarHandle j("j", kInt);
VarHandle n("n", kInt);
auto forJ = For::make(
j,
0,
100,
Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)));
j, 0, 100, Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)));
auto forN = For::make(
n,
0,
@ -5332,9 +5290,7 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies6) {
0,
100,
Store::make(
b_buf,
{k},
Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
b_buf, {k}, Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@ -5362,9 +5318,7 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies7) {
0,
100,
Store::make(
b_buf,
{k},
Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
b_buf, {k}, Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j)));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forK, forJ});

View File

@ -785,10 +785,7 @@ TEST(MemDependency, MemDependencyCheckerLoopBounds) {
std::vector<Stmt*> stmts(
{For::make(x, 1, 10, Store::make(b, {x}, Load::make(a, {x}))),
For::make(
x,
1,
9,
Store::make(b, {x}, Mul::make(Load::make(b, {x}), 2))),
x, 1, 9, Store::make(b, {x}, Mul::make(Load::make(b, {x}), 2))),
For::make(x, 3, 4, Store::make(c, {x}, Load::make(a, {x}))),
For::make(x, 0, 10, Store::make(c, {x}, Load::make(b, {x})))});
@ -977,14 +974,9 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsIndexShift) {
0,
9,
Store::make(
a,
{ExprHandle(9) - x},
Load::make(a, {ExprHandle(8) - x}))),
a, {ExprHandle(9) - x}, Load::make(a, {ExprHandle(8) - x}))),
For::make(
x,
0,
10,
Store::make(a, {x}, Load::make(a, {ExprHandle(9) - x}))),
x, 0, 10, Store::make(a, {x}, Load::make(a, {ExprHandle(9) - x}))),
For::make(x, 0, 10, Store::make(b, {x}, Load::make(a, {x})))});
stmt->accept(&analyzer);
@ -1326,9 +1318,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
3,
10,
Store::make(
a,
{ExprHandle(9) - x},
Load::make(a, {ExprHandle(8) - x})));
a, {ExprHandle(9) - x}, Load::make(a, {ExprHandle(8) - x})));
stmt->accept(&analyzer);
// However here was can determine the A store is earlier in the order than
@ -1352,9 +1342,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
3,
10,
Store::make(
a,
{ExprHandle(8) - x},
Load::make(a, {ExprHandle(9) - x})));
a, {ExprHandle(8) - x}, Load::make(a, {ExprHandle(9) - x})));
stmt->accept(&analyzer);
ASSERT_TRUE(isSelfDependent(analyzer.getHistory()));
@ -1375,9 +1363,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
3,
10,
Store::make(
a,
{ExprHandle(9) - x},
Load::make(a, {ExprHandle(8) - x})));
a, {ExprHandle(9) - x}, Load::make(a, {ExprHandle(8) - x})));
stmt->accept(&analyzer);
ASSERT_TRUE(isSelfDependent(analyzer.getHistory()));
@ -1438,10 +1424,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
// distinct.
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x * 2}, Load::make(a, {x * 2 + 1})));
x, 0, 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 + 1})));
stmt->accept(&analyzer);
ASSERT_FALSE(isSelfDependent(analyzer.getHistory()));
@ -1457,10 +1440,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
1,
10,
Store::make(a, {x * 2}, Load::make(a, {x * 2 - 1})));
x, 1, 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 - 1})));
stmt->accept(&analyzer);
ASSERT_FALSE(isSelfDependent(analyzer.getHistory()));
@ -1476,10 +1456,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x * 2}, Load::make(a, {x * 2 + 2})));
x, 0, 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 + 2})));
stmt->accept(&analyzer);
ASSERT_TRUE(isSelfDependent(analyzer.getHistory()));
@ -1495,10 +1472,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
1,
10,
Store::make(a, {x * 2}, Load::make(a, {x * 2 - 2})));
x, 1, 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 - 2})));
stmt->accept(&analyzer);
ASSERT_TRUE(isSelfDependent(analyzer.getHistory()));
@ -1514,10 +1488,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
// of stride.
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x * 2}, Load::make(a, {x * 2 + 7})));
x, 0, 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 + 7})));
stmt->accept(&analyzer);
ASSERT_FALSE(isSelfDependent(analyzer.getHistory()));
@ -1532,10 +1503,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
// Works with offsets which are multiples of the stride.
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x * 2}, Load::make(a, {x * 2 + 4})));
x, 0, 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 + 4})));
stmt->accept(&analyzer);
ASSERT_TRUE(isSelfDependent(analyzer.getHistory()));
@ -1552,10 +1520,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x * 6}, Load::make(a, {x * 6 + 5})));
x, 0, 10, Store::make(a, {x * 6}, Load::make(a, {x * 6 + 5})));
stmt->accept(&analyzer);
ASSERT_FALSE(isSelfDependent(analyzer.getHistory()));
@ -1605,10 +1570,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x * 2}, Load::make(a, {x * 6 + 1})));
x, 0, 10, Store::make(a, {x * 2}, Load::make(a, {x * 6 + 1})));
stmt->accept(&analyzer);
ASSERT_FALSE(isSelfDependent(analyzer.getHistory()));
@ -1624,10 +1586,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x * 2}, Load::make(a, {x * 6 + 4})));
x, 0, 10, Store::make(a, {x * 2}, Load::make(a, {x * 6 + 4})));
stmt->accept(&analyzer);
ASSERT_TRUE(isSelfDependent(analyzer.getHistory()));
@ -1643,10 +1602,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x * 2 + 3}, Load::make(a, {x * 6})));
x, 0, 10, Store::make(a, {x * 2 + 3}, Load::make(a, {x * 6})));
stmt->accept(&analyzer);
ASSERT_FALSE(isSelfDependent(analyzer.getHistory()));
@ -1661,10 +1617,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
// If they have strides with no common muliple > 1, they overlap.
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x * 2}, Load::make(a, {x * 3 + 1})));
x, 0, 10, Store::make(a, {x * 2}, Load::make(a, {x * 3 + 1})));
stmt->accept(&analyzer);
ASSERT_TRUE(isSelfDependent(analyzer.getHistory()));
@ -1695,10 +1648,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
// If they have different execution orders they may overlap.
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x}, Load::make(a, {ExprHandle(9) - x})));
x, 0, 10, Store::make(a, {x}, Load::make(a, {ExprHandle(9) - x})));
stmt->accept(&analyzer);
ASSERT_TRUE(isSelfDependent(analyzer.getHistory()));
@ -1747,10 +1697,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) {
// If the stride is not monotonic, they overlap - even with an offset.
MemDependencyChecker analyzer;
Stmt* stmt = For::make(
x,
0,
10,
Store::make(a, {x / 2}, Load::make(a, {x / 2 + 1})));
x, 0, 10, Store::make(a, {x / 2}, Load::make(a, {x / 2 + 1})));
stmt->accept(&analyzer);
ASSERT_TRUE(isSelfDependent(analyzer.getHistory()));
@ -1819,10 +1766,7 @@ TEST(MemDependency, MemDependencyCheckerLoopDistinctStrides) {
MemDependencyChecker analyzer({a.node()}, {b.node()});
Stmt* stmt = Block::make(
{For::make(
x,
0,
10,
Store::make(b, {x * 2 + 1}, Load::make(a, {x * 2 + 1}))),
x, 0, 10, Store::make(b, {x * 2 + 1}, Load::make(a, {x * 2 + 1}))),
For::make(x, 0, 10, Store::make(b, {x * 2}, Load::make(a, {x * 2})))
});
@ -2050,9 +1994,7 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsCond) {
{For::make(x, 0, 10, initStore),
Cond::make(
CompareSelect::make(
conditionalLoad,
5,
CompareSelectOperation::kLT),
conditionalLoad, 5, CompareSelectOperation::kLT),
Store::make(c, {0}, 5),
nullptr)});
@ -2368,10 +2310,7 @@ TEST(MemDependency, MemDependencyCheckerDynamicShapes) {
*/
MemDependencyChecker analyzer({a, b}, {c});
Stmt* stmt = Block::make({For::make(
x,
0,
10,
Store::make(c, {x}, Load::make(a, {Load::make(b, {x})})))});
x, 0, 10, Store::make(c, {x}, Load::make(a, {Load::make(b, {x})})))});
stmt->accept(&analyzer);
@ -2416,10 +2355,7 @@ TEST(MemDependency, MemDependencyCheckerDynamicShapes) {
*/
MemDependencyChecker analyzer({a, b}, {c});
Stmt* stmt = Block::make({For::make(
x,
0,
10,
Store::make(c, {Load::make(b, {x})}, Load::make(a, {x})))});
x, 0, 10, Store::make(c, {Load::make(b, {x})}, Load::make(a, {x})))});
stmt->accept(&analyzer);
@ -2463,10 +2399,7 @@ TEST(MemDependency, MemDependencyCheckerDynamicShapes) {
*/
MemDependencyChecker analyzer({a, b}, {c});
Stmt* stmt = Block::make({For::make(
x,
0,
10,
Store::make(c, {Load::make(b, {Load::make(a, {x})})}, x))});
x, 0, 10, Store::make(c, {Load::make(b, {Load::make(a, {x})})}, x))});
stmt->accept(&analyzer);

View File

@ -693,10 +693,7 @@ TEST(Registerizer, RegisterizerNoLoads) {
Stmt* stmt = Block::make(
{Store::make(a, {0}, 0),
For::make(
x,
0,
10,
Block::make({Store::make(a, {0}, Add::make(x, 1))}))});
x, 0, 10, Block::make({Store::make(a, {0}, Add::make(x, 1))}))});
/*
* A[0] = 0;
@ -1463,9 +1460,7 @@ TEST(Registerizer, RegisterizerCondCondition) {
Store::make(c, {x}, Load::make(a, {x})),
Cond::make(
CompareSelect::make(
Load::make(a, {x}),
5,
CompareSelectOperation::kLT),
Load::make(a, {x}), 5, CompareSelectOperation::kLT),
Store::make(c, {x}, Add::make(Load::make(c, {x}), 1)),
nullptr)});
@ -1793,9 +1788,7 @@ TEST(Registerizer, RegisterizerIfThenElseCondition) {
{x},
IfThenElse::make(
CompareSelect::make(
Load::make(a, {x}),
5,
CompareSelectOperation::kLT),
Load::make(a, {x}), 5, CompareSelectOperation::kLT),
Load::make(b, {0}),
Load::make(c, {0})))});
@ -1840,9 +1833,7 @@ TEST(Registerizer, RegisterizerIfThenElseConditionUnhidden) {
{x},
IfThenElse::make(
CompareSelect::make(
Load::make(a, {x}),
5,
CompareSelectOperation::kLT),
Load::make(a, {x}), 5, CompareSelectOperation::kLT),
Add::make(Load::make(a, {x}), 1),
Add::make(Load::make(a, {x}), 10)))});
@ -1934,9 +1925,7 @@ TEST(Registerizer, RegisterizerCondIfThenElse) {
CompareSelect::make(
IfThenElse::make(
CompareSelect::make(
Load::make(a, {x}),
5,
CompareSelectOperation::kLT),
Load::make(a, {x}), 5, CompareSelectOperation::kLT),
Load::make(a, {x}),
Load::make(b, {x})),
x,
@ -2192,16 +2181,10 @@ TEST(Registerizer, RegisterizerPartialInside) {
Stmt* stmt = Block::make(
{Store::make(a, {0}, 2),
For::make(
x1,
0,
10,
Store::make(a, {0}, Add::make(Load::make(a, {0}), x1))),
x1, 0, 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), x1))),
For::make(x2, 1, 10, Store::make(a, {x2}, Load::make(a, {x2 - 1}))),
For::make(
x3,
0,
10,
Store::make(a, {0}, Add::make(Load::make(a, {0}), x3)))});
x3, 0, 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), x3)))});
/*
* A[0] = 2;
@ -2267,19 +2250,13 @@ TEST(Registerizer, RegisterizerPartialCondition) {
Stmt* stmt = Block::make(
{Store::make(a, {0}, 2),
For::make(
x,
0,
10,
Store::make(a, {0}, Add::make(Load::make(a, {0}), x))),
x, 0, 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), x))),
Cond::make(
CompareSelect::make(x, 5, CompareSelectOperation::kLT),
Store::make(a, {x}, Load::make(a, {x - 1})),
nullptr),
For::make(
x,
0,
10,
Store::make(a, {0}, Add::make(Load::make(a, {0}), x)))});
x, 0, 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), x)))});
/*
* A[0] = 2;
@ -3298,18 +3275,12 @@ TEST(Registerizer, RegisterizerTwoConditionalLoops) {
{Cond::make(
CompareSelect::make(x, 5, CompareSelectOperation::kLT),
For::make(
x,
0,
10,
Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))),
x, 0, 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))),
nullptr),
Cond::make(
CompareSelect::make(x, 5, CompareSelectOperation::kGT),
For::make(
x,
0,
10,
Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))),
x, 0, 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))),
nullptr)});
/*
@ -3377,19 +3348,13 @@ TEST(Registerizer, RegisterizerTwoConditionalLoopsCut) {
{Cond::make(
CompareSelect::make(x, 5, CompareSelectOperation::kLT),
For::make(
x,
0,
10,
Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))),
x, 0, 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))),
nullptr),
For::make(x, 0, 10, Store::make(a, {x}, 1)),
Cond::make(
CompareSelect::make(x, 5, CompareSelectOperation::kGT),
For::make(
x,
0,
10,
Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))),
x, 0, 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))),
nullptr)});
/*

View File

@ -1872,9 +1872,7 @@ TEST(Simplify, SimplifyNestedMax) {
{
// Max(5, Max(x, Max(y, Max(z, 8)))) => Max(Max(Max(x, 8), y), z)
ExprHandle body = Max::make(
5,
Max::make(x, Max::make(y, Max::make(z, 8, true), true), true),
true);
5, Max::make(x, Max::make(y, Max::make(z, 8, true), true), true), true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Max, simplified.node(), max1);
@ -1888,9 +1886,7 @@ TEST(Simplify, SimplifyNestedMax) {
{
// Max(8, Max(Max(y, Max(z, 5)), x)) => Max(Max(Max(x, 8), y), z)
ExprHandle body = Max::make(
8,
Max::make(Max::make(y, Max::make(z, 5, true), true), x, true),
true);
8, Max::make(Max::make(y, Max::make(z, 5, true), true), x, true), true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Max, simplified.node(), max1);
@ -1904,9 +1900,7 @@ TEST(Simplify, SimplifyNestedMax) {
{
// Max(5, Max(Max(Max(z, 8), y), x)) => Max(Max(Max(x, 8), y), z)
ExprHandle body = Max::make(
5,
Max::make(Max::make(Max::make(z, 8, true), y, true), x, true),
true);
5, Max::make(Max::make(Max::make(z, 8, true), y, true), x, true), true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Max, simplified.node(), max1);
@ -1920,9 +1914,7 @@ TEST(Simplify, SimplifyNestedMax) {
{
// Max(Max(x, Max(y, Max(5, z))), 8) => Max(Max(Max(x, 8), y), z)
ExprHandle body = Max::make(
Max::make(x, Max::make(y, Max::make(5, z, true), true), true),
8,
true);
Max::make(x, Max::make(y, Max::make(5, z, true), true), true), 8, true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Max, simplified.node(), max1);
@ -1936,9 +1928,7 @@ TEST(Simplify, SimplifyNestedMax) {
{
// Max(Max(Max(y, Max(8, z)), x), 5) => Max(Max(Max(x, 8), y), z)
ExprHandle body = Max::make(
Max::make(Max::make(y, Max::make(z, 8, true), true), x, true),
5,
true);
Max::make(Max::make(y, Max::make(z, 8, true), true), x, true), 5, true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Max, simplified.node(), max1);
@ -1952,9 +1942,7 @@ TEST(Simplify, SimplifyNestedMax) {
{
// Max(Max(Max(Max(5, z), y), x), 8) => Max(Max(Max(x, 8), y), z)
ExprHandle body = Max::make(
Max::make(Max::make(Max::make(z, 5, true), y, true), x, true),
8,
true);
Max::make(Max::make(Max::make(z, 5, true), y, true), x, true), 8, true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Max, simplified.node(), max1);
@ -1991,9 +1979,7 @@ TEST(Simplify, SimplifyNestedMax) {
{
// Max(8, Max(Max(x, 5), Max(y, z))) => Max(Max(Max(x, 8), y), z)
ExprHandle body = Max::make(
8,
Max::make(Max::make(x, 5, true), Max::make(y, z, true), true),
true);
8, Max::make(Max::make(x, 5, true), Max::make(y, z, true), true), true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Max, simplified.node(), max1);
@ -2007,9 +1993,7 @@ TEST(Simplify, SimplifyNestedMax) {
{
// Max(Max(Max(x, 5), Max(y, z)), 8) => Max(Max(Max(x, 8), y), z)
ExprHandle body = Max::make(
Max::make(Max::make(x, 5, true), Max::make(y, z, true), true),
8,
true);
Max::make(Max::make(x, 5, true), Max::make(y, z, true), true), 8, true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Max, simplified.node(), max1);
@ -2193,9 +2177,7 @@ TEST(Simplify, SimplifyNestedMin) {
{
// Min(5, Min(x, Min(y, Min(z, 8)))) => Min(Min(Min(x, 5), y), z)
ExprHandle body = Min::make(
5,
Min::make(x, Min::make(y, Min::make(z, 8, true), true), true),
true);
5, Min::make(x, Min::make(y, Min::make(z, 8, true), true), true), true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Min, simplified.node(), min1);
@ -2209,9 +2191,7 @@ TEST(Simplify, SimplifyNestedMin) {
{
// Min(5, Min(Min(y, Min(z, 8)), x)) => Min(Min(Min(x, 5), y), z)
ExprHandle body = Min::make(
5,
Min::make(Min::make(y, Min::make(z, 8, true), true), x, true),
true);
5, Min::make(Min::make(y, Min::make(z, 8, true), true), x, true), true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Min, simplified.node(), min1);
@ -2225,9 +2205,7 @@ TEST(Simplify, SimplifyNestedMin) {
{
// Min(5, Min(Min(Min(z, 8), y), x)) => Min(Min(Min(x, 5), y), z)
ExprHandle body = Min::make(
5,
Min::make(Min::make(Min::make(z, 8, true), y, true), x, true),
true);
5, Min::make(Min::make(Min::make(z, 8, true), y, true), x, true), true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Min, simplified.node(), min1);
@ -2241,9 +2219,7 @@ TEST(Simplify, SimplifyNestedMin) {
{
// Min(Min(x, Min(y, Min(8, z))), 5) => Min(Min(Min(x, 5), y), z)
ExprHandle body = Min::make(
Min::make(x, Min::make(y, Min::make(8, z, true), true), true),
5,
true);
Min::make(x, Min::make(y, Min::make(8, z, true), true), true), 5, true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Min, simplified.node(), min1);
@ -2257,9 +2233,7 @@ TEST(Simplify, SimplifyNestedMin) {
{
// Min(Min(Min(y, Min(8, z)), x), 5) => Min(Min(Min(x, 5), y), z)
ExprHandle body = Min::make(
Min::make(Min::make(y, Min::make(z, 8, true), true), x, true),
5,
true);
Min::make(Min::make(y, Min::make(z, 8, true), true), x, true), 5, true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Min, simplified.node(), min1);
@ -2273,9 +2247,7 @@ TEST(Simplify, SimplifyNestedMin) {
{
// Min(Min(Min(Min(8, z), y), x), 5) => Min(Min(Min(x, 5), y), z)
ExprHandle body = Min::make(
Min::make(Min::make(Min::make(z, 8, true), y, true), x, true),
5,
true);
Min::make(Min::make(Min::make(z, 8, true), y, true), x, true), 5, true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Min, simplified.node(), min1);
@ -2312,9 +2284,7 @@ TEST(Simplify, SimplifyNestedMin) {
{
// Min(8, Min(Min(x, 5), Min(y, z))) => Min(Min(Min(x, 5), y), z)
ExprHandle body = Min::make(
8,
Min::make(Min::make(x, 5, true), Min::make(y, z, true), true),
true);
8, Min::make(Min::make(x, 5, true), Min::make(y, z, true), true), true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Min, simplified.node(), min1);
@ -2328,9 +2298,7 @@ TEST(Simplify, SimplifyNestedMin) {
{
// Min(Min(Min(x, 5), Min(y, z)), 8) => Min(Min(Min(x, 5), y), z)
ExprHandle body = Min::make(
Min::make(Min::make(x, 5, true), Min::make(y, z, true), true),
8,
true);
Min::make(Min::make(x, 5, true), Min::make(y, z, true), true), 8, true);
ExprHandle simplified = IRSimplifier::simplify(body);
IS_NODE_WITH_NAME(Min, simplified.node(), min1);
@ -4038,9 +4006,7 @@ TEST(Simplify, SimplifyReorderForCond) {
4,
Cond::make(
CompareSelect::make(
Load::make(c, {0}),
10,
CompareSelectOperation::kLT),
Load::make(c, {0}), 10, CompareSelectOperation::kLT),
Store::make(c, {0}, Load::make(a, {i})),
nullptr));
@ -4057,9 +4023,7 @@ TEST(Simplify, SimplifyReorderForCond) {
4,
Cond::make(
CompareSelect::make(
Load::make(b, {0}),
10,
CompareSelectOperation::kLT),
Load::make(b, {0}), 10, CompareSelectOperation::kLT),
Store::make(c, {0}, Load::make(a, {i})),
nullptr));
@ -4077,9 +4041,7 @@ TEST(Simplify, SimplifyReorderForCond) {
4,
Cond::make(
CompareSelect::make(
Load::make(a, {0}),
10,
CompareSelectOperation::kLT),
Load::make(a, {0}), 10, CompareSelectOperation::kLT),
Store::make(c, {0}, Load::make(a, {i})),
nullptr));
@ -4117,9 +4079,7 @@ TEST(Simplify, SimplifyReorderForCond) {
4,
Cond::make(
CompareSelect::make(
Load::make(a, {0}),
10,
CompareSelectOperation::kLT),
Load::make(a, {0}), 10, CompareSelectOperation::kLT),
Cond::make(
CompareSelect::make(j, 10, CompareSelectOperation::kEQ),
Store::make(c, {0}, Load::make(a, {i})),
@ -4143,9 +4103,7 @@ TEST(Simplify, SimplifyReorderForCond) {
4,
Cond::make(
CompareSelect::make(
Load::make(a, {0}),
10,
CompareSelectOperation::kLT),
Load::make(a, {0}), 10, CompareSelectOperation::kLT),
Cond::make(
CompareSelect::make(i, 10, CompareSelectOperation::kEQ),
Store::make(c, {0}, Load::make(a, {i})),
@ -4187,9 +4145,7 @@ TEST(Simplify, SimplifyReorderForCond) {
4,
Cond::make(
CompareSelect::make(
Load::make(c, {0}),
10,
CompareSelectOperation::kLT),
Load::make(c, {0}), 10, CompareSelectOperation::kLT),
Store::make(c, {1}, Load::make(a, {i})),
nullptr));

View File

@ -128,8 +128,7 @@ int main(int argc, char* argv[]) {
// First, let's specify the sizes:
std::vector<const Expr*> dims = {
new IntImm(64),
new IntImm(32)}; // IntImm stands for Integer Immediate
new IntImm(64), new IntImm(32)}; // IntImm stands for Integer Immediate
// and represents an integer constant
// Now we can create a Buf object by providing a name, dimensions, and a
@ -422,10 +421,8 @@ int main(int argc, char* argv[]) {
// Let's print one of the elements from each array to verify that the
// computation did happen:
std::cout << "A[10] = " << data_A[10]
<< std::endl
<< "B[10] = " << data_B[10]
<< std::endl
std::cout << "A[10] = " << data_A[10] << std::endl
<< "B[10] = " << data_B[10] << std::endl
<< "X[10] = A[10] + B[10] = " << data_X[10] << std::endl;
// Prints:
// A[10] = 3

View File

@ -800,8 +800,7 @@ void StaticRuntime::benchmark(
const int warmup_runs,
const int main_runs) {
float time_per_iter = benchmark_model(args, kwargs, warmup_runs, main_runs);
std::cout << "Static runtime ms per iter: "
<< time_per_iter
std::cout << "Static runtime ms per iter: " << time_per_iter
<< ". Iters per second: " << 1000.0 / time_per_iter << std::endl;
IndividualMetrics results =