Move custom passes to last optimization step (#29256)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/29256

..

Test Plan: ..

Reviewed By: ZolotukhinM

Differential Revision: D18340212

fbshipit-source-id: 30f4850c8a21bdab42c7cf04b4b92b1787449ee2
This commit is contained in:
Bram Wasti 2019-11-05 20:08:58 -08:00 committed by Facebook Github Bot
parent 6ea4219d20
commit ee21142e40

View File

@ -713,10 +713,6 @@ bool needsGradient(const std::shared_ptr<const Graph>& graph) {
} }
void runNondiffOptimization(std::shared_ptr<Graph>& graph) { void runNondiffOptimization(std::shared_ptr<Graph>& graph) {
// run custom passes that different backends can register
for (const auto& pass : getCustomPasses()) {
pass(graph);
}
// decomposition pass, decompose certain ops that will be used in the // decomposition pass, decompose certain ops that will be used in the
// following passes (like batchmm and jit fusion) // following passes (like batchmm and jit fusion)
DecomposeOps(graph); DecomposeOps(graph);
@ -732,6 +728,12 @@ void runNondiffOptimization(std::shared_ptr<Graph>& graph) {
QuantFusion(graph); QuantFusion(graph);
FuseGraph(graph); FuseGraph(graph);
// Run custom passes that different backends can register.
// This is done last to give internal optimization passes priority.
for (const auto& pass : getCustomPasses()) {
pass(graph);
}
} }
void runOptimization(std::shared_ptr<Graph>& graph) { void runOptimization(std::shared_ptr<Graph>& graph) {