pytorch/torch/csrc/jit/codegen/cuda/parser.h
jiej 127c9402d0 Revert "Revert D30752939: [pytorch][PR] nvfuser update" (#65137)
Summary:
This reverts commit 03389dc851.

Attempt again for PR: https://github.com/pytorch/pytorch/issues/63745
Fixes the windows build failure.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/65137

Reviewed By: seemethere, dzhulgakov, heitorschueroff

Differential Revision: D30994556

Pulled By: malfet

fbshipit-source-id: f1925b6c5cc1a1a441a96499667c91e8dfc1b53d
2021-09-22 04:54:51 -07:00

56 lines
1.7 KiB
C++

#pragma once
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/runtime/profiling_record.h>
#include <torch/csrc/jit/codegen/cuda/fusion.h>
/*
* This file handles Parsing PyTorch jit ir;
*
* It is used in two places:
* 1. When partitioning PyTorch jit ir to create prim::CudaFusionGroup, each
* node is queried by `isNodeParsible` to determine whether the node could
* be handled by the fuser (whether a given PyTorch jit operator should be
* merged);
* 2. lowering PyTorch jit ir to CUDA codegen ir.
* creates a `Fusion` by traversing a PyTorch jit graph.
*
* TODO: we could consider exposing API to allow custom registration of parsing
* rules for a given PyTorch jit operator.
*/
namespace torch {
namespace jit {
namespace fuser {
namespace cuda {
constexpr int kPwThreadX = 128;
constexpr int kFcdReductionThreadX = 128;
constexpr int kNonFcdReductionThreadX = 32;
constexpr int kNonFcdReductionThreadY = 32;
TORCH_CUDA_CU_API bool hasReductionNode(const Block* block);
TORCH_CUDA_CU_API bool isReductionToSizeNode(const Node* node);
TORCH_CUDA_CU_API bool isReductionNode(const Node* node);
TORCH_CUDA_CU_API bool hasNormalizationNode(const Block* block);
TORCH_CUDA_CU_API bool isNormalizationNode(const Node* node);
TORCH_CUDA_CU_API bool isElementWiseNode(const Node* node);
// returns whether or not a parsing function exists for the given node type.
TORCH_CUDA_CU_API bool isNodeParsible(const Node* node);
void InsertProfileNodes(ProfilingRecord* pr);
// lowers PyTorch jit graph to `Fusion`.
TORCH_CUDA_CU_API std::unique_ptr<Fusion> parseJitIR(
const std::shared_ptr<Graph>& graph);
} // namespace cuda
} // namespace fuser
} // namespace jit
} // namespace torch