From ee21142e40f15596a1e83c20db79ed6e20c59c93 Mon Sep 17 00:00:00 2001 From: Bram Wasti Date: Tue, 5 Nov 2019 20:08:58 -0800 Subject: [PATCH] Move custom passes to last optimization step (#29256) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/29256 .. Test Plan: .. Reviewed By: ZolotukhinM Differential Revision: D18340212 fbshipit-source-id: 30f4850c8a21bdab42c7cf04b4b92b1787449ee2 --- torch/csrc/jit/graph_executor.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/torch/csrc/jit/graph_executor.cpp b/torch/csrc/jit/graph_executor.cpp index 4abb884d31c..a98b564945c 100644 --- a/torch/csrc/jit/graph_executor.cpp +++ b/torch/csrc/jit/graph_executor.cpp @@ -713,10 +713,6 @@ bool needsGradient(const std::shared_ptr& graph) { } void runNondiffOptimization(std::shared_ptr& graph) { - // run custom passes that different backends can register - for (const auto& pass : getCustomPasses()) { - pass(graph); - } // decomposition pass, decompose certain ops that will be used in the // following passes (like batchmm and jit fusion) DecomposeOps(graph); @@ -732,6 +728,12 @@ void runNondiffOptimization(std::shared_ptr& graph) { QuantFusion(graph); FuseGraph(graph); + + // Run custom passes that different backends can register. + // This is done last to give internal optimization passes priority. + for (const auto& pass : getCustomPasses()) { + pass(graph); + } } void runOptimization(std::shared_ptr& graph) {