[skip ci] Fix "arugment" typos (#61459)

Summary:
Fixes https://github.com/pytorch/pytorch/issues/61455.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/61459

Reviewed By: soulitzer

Differential Revision: D29636559

Pulled By: samestep

fbshipit-source-id: 9ad65265c0491d9e81bb303abe3a07c6843bfa4a
This commit is contained in:
Sam Estep 2021-07-15 15:19:09 -07:00 committed by Facebook GitHub Bot
parent e5fcc903d6
commit 3a0801f960
7 changed files with 7 additions and 7 deletions

View File

@ -7,7 +7,7 @@ void common_device_check_failure(optional<Device>& common_device, const at::Tens
TORCH_CHECK(false,
"Expected all tensors to be on the same device, but "
"found at least two devices, ", common_device.value(), " and ", tensor.device(), "! "
"(when checking arugment for argument ", argName, " in method ", methodName, ")");
"(when checking argument for argument ", argName, " in method ", methodName, ")");
}
} // namespace impl

View File

@ -50,7 +50,7 @@ C10_DEFINE_string(
C10_DEFINE_bool(
no_inputs,
false,
"Whether the model has any input. Will ignore other input arugments if true");
"Whether the model has any input. Will ignore other input arguments if true");
C10_DEFINE_bool(
use_caching_allocator,
false,

View File

@ -48,7 +48,7 @@ C10_DEFINE_string(
C10_DEFINE_bool(
no_inputs,
false,
"Whether the model has any input. Will ignore other input arugments if true");
"Whether the model has any input. Will ignore other input arguments if true");
C10_DEFINE_bool(
use_caching_allocator,
false,

View File

@ -247,7 +247,7 @@ multiplication after the ``F.relu``, and then clean up the original
objects to automatically record operations into the :class:`Graph`.
To use this method, we write the operations that we want inserted as regular
PyTorch code and invoke that code with :class:`Proxy` objects as arugments.
PyTorch code and invoke that code with :class:`Proxy` objects as arguments.
These :class:`Proxy` objects will capture the operations that are performed
on them and append them to the :class:`Graph`.

View File

@ -407,7 +407,7 @@ class TestTorchFunctionOverride(TestCase):
def test_precedence_semantics(self):
"""Test semantics for __torch_function__ for functions that take
multiple arugments
multiple arguments
For functions that take multiple arguments, the appropriate
__torch_function__ implementation to call is determined by

View File

@ -5,7 +5,7 @@ namespace torch {
namespace jit {
namespace tensorexpr {
// A helper structure to store the arguments to specify dimensions. In the
// Compute arugments for dim_args, all of the following is supported. For
// Compute arguments for dim_args, all of the following is supported. For
// example:
// dim_args: {1, 2, 3, 4}
// dim_args: {{1, "x"}, {2, "y"}, {3, "z"}}

View File

@ -317,7 +317,7 @@ def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple
target (inspect.Signature): Signature object for the target
args (Tuple): Arguments that appear at the callsite for `target`
kwargs (Dict): Keyword arugments that appear at the callsite for `target`
kwargs (Dict): Keyword arguments that appear at the callsite for `target`
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
Returns: