[executorch][kernel reg] Allow kernel manual registration (#110086)

Summary:
Exposing a codegen mode for generating a hook for user to register their kernels.

If we pass `--manual-registration` flag to `gen_executorch.py`, we will generate the following files:
1. RegisterKernels.h which declares a `register_all_kernels()` API inside `torch::executor` namespace.
2. RegisterKernelsEverything.cpp which implements `register_all_kernels()` by defining an array of generated kernels.

This way user can depend on the library declared by `executorch_generated_lib` macro (with `manual_registration=True`) and be able to include `RegisterKernels.h`. Then they can manually call `register_all_kernels()` instead of relying on C++ static initialization mechanism which is not available in some embedded systems.

Test Plan:
Rely on the unit test:

```
buck2 test fbcode//executorch/runtime/kernel/test:test_kernel_manual_registration
```

Reviewed By: cccclai

Differential Revision: D49439673

Pull Request resolved: https://github.com/pytorch/pytorch/pull/110086
Approved by: https://github.com/cccclai
This commit is contained in:
Mengwei Liu 2023-09-27 16:04:20 +00:00 committed by PyTorch MergeBot
parent 1265400ba6
commit 0721a394b6
2 changed files with 43 additions and 2 deletions

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// ${generated_comment}
// Exposing an API for registering all kernels at once.
#include <executorch/runtime/core/evalue.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/kernel/operator_registry.h>
#include <executorch/runtime/platform/profiler.h>
namespace torch {
namespace executor {
Error register_all_kernels();
} // namespace executor
} // namespace torch

View File

@ -254,6 +254,7 @@ def gen_unboxing(
selector: SelectiveBuilder,
use_aten_lib: bool,
kernel_index: ETKernelIndex,
manual_registration: bool,
) -> None:
# Iterable type for write_sharded is a Tuple of (native_function, (kernel_key, metadata))
def key_func(
@ -268,9 +269,13 @@ def gen_unboxing(
]
header = ["Functions.h" if use_aten_lib else "NativeFunctions.h"]
filename = (
"RegisterKernels.cpp"
if manual_registration
else "RegisterCodegenUnboxedKernels.cpp"
)
cpu_fm.write_sharded(
"RegisterCodegenUnboxedKernels.cpp",
filename,
items,
key_fn=key_func,
env_callable=lambda unbox_kernel_entry: {
@ -449,6 +454,12 @@ def gen_headers(
),
},
)
cpu_fm.write(
"RegisterKernels.h",
lambda: {
"generated_comment": "@" + "generated by torchgen/gen_executorch.py",
},
)
headers = {
"headers": [
"#include <executorch/runtime/core/exec_aten/exec_aten.h> // at::Tensor etc.",
@ -864,6 +875,13 @@ def main() -> None:
help="a boolean flag to indicate whether we use ATen kernels or not, in the future this flag will be per "
"operator",
)
parser.add_argument(
"--manual_registration",
"--manual-registration",
action="store_true",
help="a boolean flag to indicate whether we want to maually call"
"register_kernels() or rely on static init. ",
)
parser.add_argument(
"--generate",
type=str,
@ -917,6 +935,7 @@ def main() -> None:
selector=selector,
use_aten_lib=options.use_aten_lib,
kernel_index=kernel_index,
manual_registration=options.manual_registration,
)
if custom_ops_native_functions:
gen_custom_ops(