mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Move custom passes to last optimization step (#29256)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/29256 .. Test Plan: .. Reviewed By: ZolotukhinM Differential Revision: D18340212 fbshipit-source-id: 30f4850c8a21bdab42c7cf04b4b92b1787449ee2
This commit is contained in:
parent
6ea4219d20
commit
ee21142e40
|
|
@ -713,10 +713,6 @@ bool needsGradient(const std::shared_ptr<const Graph>& graph) {
|
|||
}
|
||||
|
||||
void runNondiffOptimization(std::shared_ptr<Graph>& graph) {
|
||||
// run custom passes that different backends can register
|
||||
for (const auto& pass : getCustomPasses()) {
|
||||
pass(graph);
|
||||
}
|
||||
// decomposition pass, decompose certain ops that will be used in the
|
||||
// following passes (like batchmm and jit fusion)
|
||||
DecomposeOps(graph);
|
||||
|
|
@ -732,6 +728,12 @@ void runNondiffOptimization(std::shared_ptr<Graph>& graph) {
|
|||
QuantFusion(graph);
|
||||
|
||||
FuseGraph(graph);
|
||||
|
||||
// Run custom passes that different backends can register.
|
||||
// This is done last to give internal optimization passes priority.
|
||||
for (const auto& pass : getCustomPasses()) {
|
||||
pass(graph);
|
||||
}
|
||||
}
|
||||
|
||||
void runOptimization(std::shared_ptr<Graph>& graph) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user