Fix typo under torchgen directory (#111154)

This PR fixes typo in comments and messages in files under `torchgen` directory.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/111154
Approved by: https://github.com/rajveer43, https://github.com/Skylion007
This commit is contained in:
Kazuaki Ishizaki 2023-10-13 16:43:42 +00:00 committed by PyTorch MergeBot
parent b460c30893
commit ac48c11ab7
9 changed files with 12 additions and 12 deletions

View File

@ -124,7 +124,7 @@ def valuetype_type(
raise AssertionError(f"unrecognized type {repr(t)}")
# Translation of types occuring in JIT arguments to a C++ argument type.
# Translation of types occurring in JIT arguments to a C++ argument type.
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
# For example, we'll return std::vector<int> instead of IntArrayRef.
# See Note [translation from C++ reference to value types]

View File

@ -38,7 +38,7 @@ from torchgen.utils import assert_never
# API have been fixed.
# Translation of types occuring in JIT arguments to a C++ argument type.
# Translation of types occurring in JIT arguments to a C++ argument type.
# NB: For now, mutable doesn't do anything; but it could if we make
# some more nominal types
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:

View File

@ -216,7 +216,7 @@ class GenLazyIR(ABC):
scalar_args = schema.filtered_args(values=False, scalars=True)
# Shape constuction.
# Shape construction.
# Conditionally build shape depending on specified shape property
if schema.properties.ShapePrecompute:
shape_ctor_arg = "std::move(shapes),"

View File

@ -93,7 +93,7 @@ def valuetype_type(
raise AssertionError(f"unrecognized type {repr(t)}")
# Translation of types occuring in JIT arguments to a C++ argument type.
# Translation of types occurring in JIT arguments to a C++ argument type.
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
# For example, we'll return std::vector<int> instead of IntArrayRef.
# See Note [translation from C++ reference to value types]

View File

@ -879,7 +879,7 @@ def main() -> None:
"--manual_registration",
"--manual-registration",
action="store_true",
help="a boolean flag to indicate whether we want to maually call"
help="a boolean flag to indicate whether we want to manually call"
"register_kernels() or rely on static init. ",
)
parser.add_argument(

View File

@ -629,7 +629,7 @@ def emit_inplace_functionalization_body(
if ({str(not any_storage_args and f.func.kind() == SchemaKind.inplace).lower()}) {{
// Before converting the mutable op to its functional variant, run meta tensors through the original op.
// This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
// (We can only do this for inplace ops today though, because they technicaly all support meta tensors).
// (We can only do this for inplace ops today though, because they technically all support meta tensors).
{meta_conversion_str}
at::AutoDispatchSkipFunctionalize func_guard;
c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
@ -731,7 +731,7 @@ def gen_functionalization_registration(
# See Note [resize_ in Functionalization]
return []
assert not f.is_view_op
# functionalization needs to generate and register kernals for inplace ops.
# functionalization needs to generate and register kernels for inplace ops.
# We *also* need to directly register CompositeImplicitAUtograd kernels
# so that they decompose properly before functioanlization.
if modifies_arguments(f):

View File

@ -426,7 +426,7 @@ def run_gen_lazy_tensor(
Generated lazy native functions all perform shape inference, by first using a meta:: kernel
if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator
knows the call signature for compute_shape_{op} becuase it matches the nativefunction (and meta::) signature,
knows the call signature for compute_shape_{op} because it matches the nativefunction (and meta::) signature,
so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev
to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides
the expected signature which can be copy-pasted into shape_inference.h.

View File

@ -1413,7 +1413,7 @@ class FunctionSchema:
), "out= ops that accept tensor lists as out arguments "
"are expected to have no return type (since you can't do method chaining on them)"
else:
# mutable keyward arguments whose name has _scratch_ prefix are
# mutable keyword arguments whose name has _scratch_ prefix are
# scratch tensors for memory planning and should not be returned
assert len(
[
@ -2208,7 +2208,7 @@ class Arguments:
post_self_positional=tuple(
map(strip_arg_annotation, self.post_self_positional)
),
# Since TensorOptions are droped, the post_tensor_options_kwargs are
# Since TensorOptions are dropped, the post_tensor_options_kwargs are
# converted to pre_tensor_options_kwargs
pre_tensor_options_kwarg_only=tuple(
map(strip_arg_annotation, self.pre_tensor_options_kwarg_only)

View File

@ -371,8 +371,8 @@ def add_generated_native_functions(
rs: List[NativeFunction],
indices: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]],
) -> None:
# The main code for gnerating new NativeFunctions
# First we group of NaitveFunctions by schema kind,
# The main code for generating new NativeFunctions
# First we group of NativeFunctions by schema kind,
# then we detect which ones are missing and generate them.
pre_grouped_native_functions = pre_group_native_functions(rs)
for d in pre_grouped_native_functions.values():