mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/55334 The goal of this PR is to clean up some of the autograd codegen to compare C++ types using `CType` objects instead of raw strings. My last PR in the stack made that string comparison a little more fragile, since the raw C++ strings needed to be namespace-aware. I confirmed byte-for-byte no codegen changes vs. the last PR (which added namespaces to the codegen) by running `diff -qr ../pytorch-common_test/torch/csrc/autograd/generated/ ../pytorch-callgrind_test_after2/torch/csrc/autograd/generated/` and `diff -qr ../pytorch-common_test/build/aten/src/ATen/ ../pytorch-callgrind_test_after2/build/aten/src/ATen/` Note that a better end-state for the autograd codegen would be to do all of its type pattern matching directly off of JIT types, instead of off of CType’s (which are really just generated from JIT types, incorporating C++ specific semantics). That looks like it’ll require a pretty substantial change though, so I’m not doing it in this PR. As part of this change (and after talking with ezyang), I split off the `CType` data class into a separate `NamedCType` class, which holds a name and a `CType`. This way, `CType` only knows about actual C++ types, making it easier to compare CType’s to each other in the codegen when we only care about the type. The core change is in `types.py`, but it required a bunch of downstream changes to update all of the places where we create `CType`s to create `NamedCType`s instead. The main change in the autograd codegen was that I updated `SavedAttribute` to store a `NamedCType`. The other autograd changes all pretty much came from that change. Test Plan: Imported from OSS Reviewed By: bhosmer Differential Revision: D27708347 Pulled By: bdhirsh fbshipit-source-id: 3e07c80569c7b229c638f389e76e319bff6315f9
67 lines
2.5 KiB
Python
67 lines
2.5 KiB
Python
from tools.codegen.model import (Argument, FunctionSchema, Return,
|
|
SelfArgument, TensorOptionsArguments, Type,
|
|
assert_never)
|
|
|
|
from tools.codegen.api.types import ArgName, Binding, NamedCType, CType
|
|
from tools.codegen.api import cpp
|
|
|
|
import itertools
|
|
from typing import Sequence, List, Union
|
|
|
|
# This file describes the translation of JIT schema to the dispatcher
|
|
# API, the *unboxed* calling convention by which invocations through
|
|
# the dispatcher are made. Historically, the dispatcher API matched
|
|
# the C++ API, but with the establishment of the boxed API, we've
|
|
# made changes to the dispatcher API to so that the unboxed API
|
|
# better aligns with the boxed API. The dispatcher API hooks heavily
|
|
# into our template based boxing/unboxing machinery, so changes
|
|
# to this convention will usually need template updates too.
|
|
#
|
|
# Prominent characteristics of the dispatcher API:
|
|
#
|
|
# - dtype, layout, device and pin_memory are represented as separate
|
|
# arguments.
|
|
#
|
|
|
|
def name(func: FunctionSchema) -> str:
|
|
return cpp.name(func)
|
|
|
|
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
|
|
# This is a faux amis. If it makes sense in the future to add
|
|
# more special cases here, or invert things so cpp.argument_type
|
|
# calls this, or just completely inline the function, please do
|
|
# it.
|
|
return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
|
|
|
|
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
|
|
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
|
|
|
|
def returns_type(rs: Sequence[Return]) -> CType:
|
|
# At present, there is no difference. But there could be!
|
|
return cpp.returns_type(rs)
|
|
|
|
def argument(
|
|
a: Union[Argument, TensorOptionsArguments, SelfArgument]
|
|
) -> List[Binding]:
|
|
if isinstance(a, Argument):
|
|
return [Binding(
|
|
nctype=argument_type(a, binds=a.name),
|
|
name=a.name,
|
|
argument=a,
|
|
)]
|
|
elif isinstance(a, SelfArgument):
|
|
return argument(a.argument)
|
|
elif isinstance(a, TensorOptionsArguments):
|
|
return argument(a.dtype) + argument(a.layout) + argument(a.device) + argument(a.pin_memory)
|
|
else:
|
|
assert_never(a)
|
|
|
|
def arguments(func: FunctionSchema) -> List[Binding]:
|
|
return [
|
|
r for a in itertools.chain(
|
|
func.arguments.positional,
|
|
func.arguments.kwarg_only,
|
|
func.arguments.out
|
|
) for r in argument(a)
|
|
]
|