mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
This PR is the first step towards refactors the build for nvfuser in order to have the coegen being a standalone library.
Contents inside this PR:
1. nvfuser code base has been moved to `./nvfuser`, from `./torch/csrc/jit/codegen/cuda/`, except for registration code for integration (interface.h/interface.cpp)
2. splits the build system so nvfuser is generating its own `.so` files. Currently there are:
- `libnvfuser_codegen.so`, which contains the integration, codegen and runtime system of nvfuser
- `nvfuser.so`, which is nvfuser's python API via pybind. Python frontend is now exposed via `nvfuser._C.XXX` instead of `torch._C._nvfuser`
3. nvfuser cpp tests is currently being compiled into `nvfuser_tests`
4. cmake is refactored so that:
- nvfuser now has its own `CMakeLists.txt`, which is under `torch/csrc/jit/codegen/cuda/`.
- nvfuser backend code is not compiled inside `libtorch_cuda_xxx` any more
- nvfuser is added as a subdirectory under `./CMakeLists.txt` at the very end after torch is built.
- since nvfuser has dependency on torch, the registration of nvfuser at runtime is done via dlopen (`at::DynamicLibrary`). This avoids circular dependency in cmake, which will be a nightmare to handle. For details, look at `torch/csrc/jit/codegen/cuda/interface.cpp::LoadingNvfuserLibrary`
Future work that's scoped in following PR:
- Currently since nvfuser codegen has dependency on torch, we need to refactor that out so we can move nvfuser into a submodule and not rely on dlopen to load the library. @malfet
- Since we moved nvfuser into a cmake build, we effectively disabled bazel build for nvfuser. This could impact internal workload at Meta, so we need to put support back. cc'ing @vors
Pull Request resolved: https://github.com/pytorch/pytorch/pull/89621
Approved by: https://github.com/davidberard98
59 lines
1.9 KiB
C++
59 lines
1.9 KiB
C++
#pragma once
|
|
|
|
#include <c10/macros/Export.h>
|
|
#include <torch/csrc/jit/ir/ir.h>
|
|
#include <torch/csrc/jit/passes/pass_manager.h>
|
|
#include <torch/csrc/jit/runtime/profiling_record.h>
|
|
|
|
/*
|
|
* This file contains APIs for cuda fuser;
|
|
*
|
|
* We use an empty static struct to hold the function pointers, which are
|
|
* registered separately. This is to support cpu-only compilation.
|
|
* Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp
|
|
*/
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
namespace fuser {
|
|
namespace cuda {
|
|
|
|
TORCH_API std::atomic<bool>& getCudaFusionGuardMode();
|
|
|
|
TORCH_API bool getSingletonFusion();
|
|
TORCH_API bool setSingletonFusion(bool value);
|
|
TORCH_API bool getHorizontalFusion();
|
|
TORCH_API bool setHorizontalFusion(bool value);
|
|
|
|
// dummy struct to allow API registration
|
|
struct CudaFuserInterface {
|
|
void (*fn_compile_n)(Node*) = nullptr;
|
|
void (*fn_run_n_s)(const Node*, Stack&) = nullptr;
|
|
void (*fn_fuse_graph)(std::shared_ptr<Graph>&) = nullptr;
|
|
bool (*fn_can_fuse_n)(const Node*) = nullptr;
|
|
void (*fn_insert_profile_inodes)(ProfilingRecord* pr) = nullptr;
|
|
bool (*fn_profile_n)(const Node*) = nullptr;
|
|
bool (*fn_skip_n)(const std::string&, bool flip) = nullptr;
|
|
};
|
|
|
|
// Get interface, this is used by registration and user facing API internally
|
|
TORCH_API CudaFuserInterface* getFuserInterface();
|
|
|
|
TORCH_API void compileFusionGroup(Node* fusion_node);
|
|
TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack);
|
|
TORCH_API void fuseGraph(std::shared_ptr<Graph>&);
|
|
TORCH_API bool canFuseNode(const Node* node);
|
|
TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr);
|
|
TORCH_API bool profileNode(const Node* node);
|
|
|
|
TORCH_API bool skipNode(const std::string& symbol_str, bool flip = true);
|
|
|
|
TORCH_API bool isEnabled();
|
|
TORCH_API bool setEnabled(bool is_enabled);
|
|
TORCH_API bool canBeEnabled();
|
|
|
|
} // namespace cuda
|
|
} // namespace fuser
|
|
} // namespace jit
|
|
} // namespace torch
|