pytorch/torch/csrc/jit/codegen/cuda/parser.h
jiej e4e19d5beb nvfuser parser skip api (#74520)
Summary:
added python API to disable nvfuser on certain opkind.

```
          "_jit_set_nvfuser_skip_node_kind",
          [](const std::string& op_name, bool flip = true) {
            return fuser::cuda::skipNode(op_name, flip);
          })
```

Args:
    `op_name`: Symbol of op;
    `flip`: flag indicating whether to flip the given op in the skip list.
Returns:
    a bool flag indicating if `op_name` was already in the skip list.

The python example that disables the fusion of `aten::add` afterwards.
`torch._C._jit_set_nvfuser_skip_node_kind("aten::add", True)  # returns False, as no op is in skip list by default`

Pull Request resolved: https://github.com/pytorch/pytorch/pull/74520

Reviewed By: saketh-are

Differential Revision: D35046110

Pulled By: davidberard98

fbshipit-source-id: 689f5286513dbab206768823a852467b9f6b49b6
(cherry picked from commit 9a31129f7591ba2d393ab057b1cd137a6a25e7e8)
2022-03-23 20:56:43 +00:00

59 lines
1.9 KiB
C++

#pragma once
#include <c10/macros/Export.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/runtime/profiling_record.h>
#include <torch/csrc/jit/codegen/cuda/fusion.h>
/*
* This file handles Parsing PyTorch jit ir;
*
* It is used in two places:
* 1. When partitioning PyTorch jit ir to create prim::CudaFusionGroup, each
* node is queried by `isNodeParsible` to determine whether the node could
* be handled by the fuser (whether a given PyTorch jit operator should be
* merged);
* 2. lowering PyTorch jit ir to CUDA codegen ir.
* creates a `Fusion` by traversing a PyTorch jit graph.
*
* TODO: we could consider exposing API to allow custom registration of parsing
* rules for a given PyTorch jit operator.
*/
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
constexpr int kPwThreadX = 128;
constexpr int kFcdReductionThreadX = 128;
constexpr int kNonFcdReductionThreadX = 32;
constexpr int kNonFcdReductionThreadY = 32;
TORCH_CUDA_CU_API bool hasReductionNode(const Block* block);
TORCH_CUDA_CU_API bool isReductionToSizeNode(const Node* node);
TORCH_CUDA_CU_API bool isReductionNode(const Node* node);
TORCH_CUDA_CU_API bool hasNormalizationNode(const Block* block);
TORCH_CUDA_CU_API bool isNormalizationNode(const Node* node);
TORCH_CUDA_CU_API bool isElementWiseNode(const Node* node);
// returns whether or not a parsing function exists for the given node type.
TORCH_CUDA_CU_API bool isNodeParsible(const Node* node);
TORCH_CUDA_CU_API bool shouldProfileNode(const Node* node);
TORCH_CUDA_CU_API bool skipNodeKind(const std::string& symbol_str, bool flip);
void InsertProfileNodes(ProfilingRecord* pr);
// lowers PyTorch jit graph to `Fusion`.
TORCH_CUDA_CU_API std::unique_ptr<Fusion> parseJitIR(
const std::shared_ptr<Graph>& graph);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch