mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/45722 This diff does a bunch of things: 1. Introduces some abstractions as detailed in https://fb.quip.com/2oEzAR5MKqbD to help with selective build related codegen in multiple files. 2. Adds helper methods to combine operators, debug info, operator lists, etc... 3. Currently, the selective build machinery querying `op_registration_whitelist` directly at various places in the code. `op_registration_whitelist` is a list of allowed operator names (without overload name). We want to move to a world where the overload names are also included so that we can be more selective about which operators we include. To that effect, it makes sense to hide the checking logic in a separate abstraction and have the build use that abstraction instead of putting all this selective build specific logic in the code-generator itself. This change is attempting to do just that. 4. Updates generate_code, unboxing-wrapper codegen, and autograd codegen to accept the operator selector paradigm as opposed to a selected operator list. 5. Update `tools/code_analyzer/gen_op_registration_allowlist.py` to expose providing an actual structured operator dependency graph in addition to a serialized string. There are a bunch of structural changes as well: 1. `root_op_list.yaml` and `combined_op_list.yaml` are now actual YAML files (not a space separated list of operator names) 2. `generate_code.py` accepts only paths to operator list YAML files (both old style as well as new style) and not list of operator names on the command line as arguments 3. `gen.py` optionally also accepts a custom build related operators YAML path (this file has information about which operators to register in the generated library). ghstack-source-id: 114578753 (Note: this ignores all push blocking failures!) Test Plan: `buck test caffe2/test:selective_build` Generated YAML files after the change: {P143981979} {P143982025} {P143982056} Ensure that the generated files are same before and after the change: ``` [dhruvbird@devvm2490 /tmp/TypeDefault.cpp] find -name "*.cpp" | xargs md5sum d72c3d125baa7b77e4c5581bbc7110d2 ./after_change/gen_aten/TypeDefault.cpp 42353036c83ebc7620a7159235b9647f ./after_change/lite_predictor_lib_aten/TypeDefault.cpp d72c3d125baa7b77e4c5581bbc7110d2 ./before_change/gen_aten/TypeDefault.cpp 42353036c83ebc7620a7159235b9647f ./before_change/lite_predictor_lib_aten/TypeDefault.cpp ``` `VariableTypes_N.cpp` are generated the same both before and after the change: ``` [dhruvbird@devvm2490 /tmp/VariableType] find -name "*.cpp" | xargs -n 1 md5sum | sort 3be89f63fd098291f01935077a60b677 ./after/VariableType_2.cpp 3be89f63fd098291f01935077a60b677 ./before/VariableType_2.cpp 40a3e59d64e9dbe86024cf314f127fd6 ./after/VariableType_4.cpp 40a3e59d64e9dbe86024cf314f127fd6 ./before/VariableType_4.cpp a4911699ceda3c3a430f08c64e8243fd ./after/VariableType_1.cpp a4911699ceda3c3a430f08c64e8243fd ./before/VariableType_1.cpp ca9aa611fcb2a573a8cba4e269468c99 ./after/VariableType_0.cpp ca9aa611fcb2a573a8cba4e269468c99 ./before/VariableType_0.cpp e18f639ed23d802dc4a31cdba40df570 ./after/VariableType_3.cpp e18f639ed23d802dc4a31cdba40df570 ./before/VariableType_3.cpp ``` Reviewed By: ljk53 Differential Revision: D23837010 fbshipit-source-id: ad06b1756af5be25baa39fd801dfdf09bc565442
160 lines
6.3 KiB
Python
160 lines
6.3 KiB
Python
from typing import Dict, Optional, Tuple
|
|
from dataclasses import dataclass
|
|
|
|
# This class holds information about a single operator used to determine
|
|
# the outcome of a selective/custom PyTorch build that doesn't include
|
|
# registration code for all the supported operators. This is done to
|
|
# reduce the size of the generated binary so that it can be deployed in
|
|
# situations where binary size comes at a premium.
|
|
#
|
|
@dataclass(frozen=True)
|
|
class SelectiveBuildOperator():
|
|
# The name of the operator. This includes the aten::, etc... prefix
|
|
# The operator name may or may not have the overload name. If this
|
|
# operator name does not specify an overload name, the way to determine
|
|
# if this entry refers to the family of operators with this base name
|
|
# or just the operator with this name is to look at the value of the
|
|
# 'include_all_overloads' flag in this class.
|
|
name: str
|
|
|
|
# True if this is a root operator (i.e. called directly from a
|
|
# TorchScript model, etc...). An operator is considered to be a
|
|
# root operator if it is called directly from any one of the models
|
|
# that this instance of the pytorch library was built for. Hence, it
|
|
# may not be a root operator in all of the models that are used in
|
|
# this instance of the pytorch library.
|
|
is_root_operator: bool
|
|
|
|
# Is this operator used for on-device training? If True, then we need to
|
|
# use the information to generate code in VariableType_N.cpp for registration
|
|
# of training related operators. Again, this is True if this operator
|
|
# is used for training in one or more models used by this instance of the
|
|
# pytorch library.
|
|
is_used_for_training: bool
|
|
|
|
# If True, it indicates that this operator instance (object) refers to an
|
|
# operator without the overload name and should apply to all overloads
|
|
# which have this operator name as the base name. This flag is applicable
|
|
# only for objects that have operator names without a DOT (period) character
|
|
# in them.
|
|
#
|
|
# Note: This flag is a temporary workaround to grandfather in the current
|
|
# static selective (custom) build mechanism, which largely ignores overload
|
|
# names when determining whether to select operators for registration
|
|
# purposes.
|
|
include_all_overloads: bool
|
|
|
|
# Debug Information at the operator level
|
|
_debug_info: Optional[Tuple[str, ...]]
|
|
|
|
@staticmethod
|
|
def from_yaml_dict(op_name: str, op_info: Dict[str, object]) -> 'SelectiveBuildOperator':
|
|
allowed_keys = {'name', 'is_root_operator', 'is_used_for_training', 'include_all_overloads', 'debug_info'}
|
|
|
|
if len(set(op_info.keys()) - allowed_keys) > 0:
|
|
raise Exception("Got unexpected top level keys: {}".format(
|
|
",".join(set(op_info.keys()) - allowed_keys),
|
|
))
|
|
|
|
if 'name' in op_info:
|
|
assert op_name == op_info['name']
|
|
|
|
is_root_operator = op_info.get('is_root_operator', True)
|
|
assert isinstance(is_root_operator, bool)
|
|
|
|
is_used_for_training = op_info.get('is_used_for_training', True)
|
|
assert isinstance(is_used_for_training, bool)
|
|
|
|
include_all_overloads = op_info.get('include_all_overloads', True)
|
|
assert isinstance(include_all_overloads, bool)
|
|
|
|
debug_info: Optional[Tuple[str, ...]] = None
|
|
if 'debug_info' in op_info:
|
|
di_list = op_info['debug_info']
|
|
assert isinstance(di_list, list)
|
|
debug_info = tuple(map(lambda x: str(x), di_list))
|
|
|
|
return SelectiveBuildOperator(
|
|
name=op_name,
|
|
is_root_operator=is_root_operator,
|
|
is_used_for_training=is_used_for_training,
|
|
include_all_overloads=include_all_overloads,
|
|
_debug_info=debug_info,
|
|
)
|
|
|
|
@staticmethod
|
|
def from_legacy_operator_name_without_overload(name: str) -> 'SelectiveBuildOperator':
|
|
return SelectiveBuildOperator(
|
|
name=name,
|
|
is_root_operator=True,
|
|
is_used_for_training=True,
|
|
include_all_overloads=True,
|
|
_debug_info=None,
|
|
)
|
|
|
|
def to_dict(self) -> Dict[str, object]:
|
|
ret: Dict[str, object] = {
|
|
'is_root_operator': self.is_root_operator,
|
|
'is_used_for_training': self.is_used_for_training,
|
|
'include_all_overloads': self.include_all_overloads,
|
|
}
|
|
if self._debug_info is not None:
|
|
ret['debug_info'] = self._debug_info
|
|
|
|
return ret
|
|
|
|
|
|
def merge_debug_info(
|
|
lhs: Optional[Tuple[str, ...]],
|
|
rhs: Optional[Tuple[str, ...]],
|
|
) -> Optional[Tuple[str, ...]]:
|
|
# Ensure that when merging, each entry shows up just once.
|
|
if lhs is None and rhs is None:
|
|
return None
|
|
|
|
return tuple(set((lhs or ()) + (rhs or ())))
|
|
|
|
|
|
def combine_operators(
|
|
lhs: 'SelectiveBuildOperator',
|
|
rhs: 'SelectiveBuildOperator') -> 'SelectiveBuildOperator':
|
|
if str(lhs.name) != str(rhs.name):
|
|
raise Exception(
|
|
"Expected both arguments to have the same name, but got '{}' and '{}' instead".format(
|
|
str(lhs.name),
|
|
str(rhs.name),
|
|
)
|
|
)
|
|
|
|
return SelectiveBuildOperator(
|
|
name=lhs.name,
|
|
# Consider this operator to be a root operator if it is a
|
|
# root operator in any of the models used in this instance of
|
|
# the pytorch library.
|
|
is_root_operator=lhs.is_root_operator or rhs.is_root_operator,
|
|
# Consider this operator to be a training operator if it is
|
|
# an operator used for training in any of the models used
|
|
# in this instance of the pytorch library.
|
|
is_used_for_training=lhs.is_used_for_training or rhs.is_used_for_training,
|
|
include_all_overloads=lhs.include_all_overloads or rhs.include_all_overloads,
|
|
_debug_info=merge_debug_info(lhs._debug_info, rhs._debug_info),
|
|
)
|
|
|
|
def merge_operator_dicts(
|
|
lhs: Dict[str, SelectiveBuildOperator],
|
|
rhs: Dict[str, SelectiveBuildOperator],
|
|
) -> Dict[str, SelectiveBuildOperator]:
|
|
operators: Dict[str, SelectiveBuildOperator] = {}
|
|
for (op_name, op) in list(lhs.items()) + list(rhs.items()):
|
|
new_op = op
|
|
if op_name in operators:
|
|
new_op = combine_operators(operators[op_name], op)
|
|
|
|
operators[op_name] = new_op
|
|
|
|
return operators
|
|
|
|
|
|
def strip_operator_overload_name(op_name: str) -> str:
|
|
return op_name.split(".")[0]
|