Change docstring type callable to Callable for consistency (#82487)

### Description

Across PyTorch's docstrings, both `callable` and `Callable` for variable types. The Callable should be capitalized as we are referring to the `Callable` type, and not the Python `callable()` function.

### Testing

There shouldn't be any testing required.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/82487
Approved by: https://github.com/albanD
This commit is contained in:
ProGamerGov 2022-08-01 17:26:09 +00:00 committed by PyTorch MergeBot
parent c2ac3e6831
commit 71d50f4f89
29 changed files with 36 additions and 36 deletions

View File

@ -57,7 +57,7 @@ def visualize_graph_executor(state, name_prefix, pb_graph, inline_graph):
state (GraphExecutor or GraphExecutorState): GraphExecutor to display.
name_prefix (str): Name prefix of the containing subgraph.
pb_graph (GraphDef): graph to append to.
inline_graph (callable): a function that handles setting up a value_map,
inline_graph (Callable): a function that handles setting up a value_map,
so that some graphs in here can be inlined. This is necessary, because
this will simply be `visualize` for the top-level GraphExecutor,
or `inline_graph` for all nested ones.

View File

@ -61,7 +61,7 @@ class ShardedOptimizer(optim.Optimizer):
r"""Performs a single optimization step (parameter update).
Args:
closure (callable): A closure that reevaluates the model and
closure (Callable): A closure that reevaluates the model and
returns the loss. Optional for most optimizers.
.. note::

View File

@ -95,11 +95,11 @@ def auto_quantize(func, qtype, quant_loss=None):
. all_gather, all_to_all collective ops
Note: BFP16 only supports 2D tensors.
Args:
func (callable): A function representing collective operations.
func (Callable): A function representing collective operations.
qtype (QuantType): Quantization method
quant_loss (float, optional): This can be used to improve accuracy in the dequantization.
Returns:
(callable): the same collective as func but enables automatic quantization/dequantization.
(Callable): the same collective as func but enables automatic quantization/dequantization.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):

View File

@ -1073,7 +1073,7 @@ class P2POp(object):
``batch_isend_irecv`` for point-to-point communications.
Args:
op (callable): A function to send data to or receive data from a peer process.
op (Callable): A function to send data to or receive data from a peer process.
The type of ``op`` is either ``torch.distributed.isend`` or
``torch.distributed.irecv``.
tensor (Tensor): Tensor to send or receive.

View File

@ -4128,7 +4128,7 @@ class FullyShardedDataParallel(nn.Module):
peers to communicate with next in `GossipGrad <https://arxiv.org/abs/1803.05880>`_, etc.
It is locally stored by each worker
and shared by all the gradient tensors on the worker.
hook (callable): Callable with the following signature:
hook (Callable): Callable with the following signature:
``hook: Callable[torch.Tensor] -> None``:
This function takes in a Python tensor, which represents
the full, flattened, unsharded gradient with respect to all variables

View File

@ -984,7 +984,7 @@ class ZeroRedundancyOptimizer(Optimizer, Joinable):
If the argument itself is ``None``, then all parameters are
updated, and the gradients are assumed to be already populated.
(default: ``None``)
closure (callable): a closure that re-evaluates the model and
closure (Callable): a closure that re-evaluates the model and
returns the loss; optional for most optimizers and should be
``None`` if ``gradients`` is not ``None``; (default: ``None``)
Returns:
@ -1043,7 +1043,7 @@ class ZeroRedundancyOptimizer(Optimizer, Joinable):
Performs a single optimizer step and syncs parameters across all ranks.
Arguments:
closure (callable): a closure that re-evaluates the model and
closure (Callable): a closure that re-evaluates the model and
returns the loss; optional for most optimizers.
Returns:
Optional loss depending on the underlying local optimizer.

View File

@ -528,7 +528,7 @@ def remote(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
Args:
to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
func (callable): a callable function, such as Python callables, builtin
func (Callable): a callable function, such as Python callables, builtin
operators (e.g. :meth:`~torch.add`) and annotated
TorchScript functions.
args (tuple): the argument tuple for the ``func`` invocation.
@ -736,7 +736,7 @@ def rpc_sync(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
Args:
to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
func (callable): a callable function, such as Python callables, builtin
func (Callable): a callable function, such as Python callables, builtin
operators (e.g. :meth:`~torch.add`) and annotated
TorchScript functions.
args (tuple): the argument tuple for the ``func`` invocation.
@ -810,7 +810,7 @@ def rpc_async(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
Args:
to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
func (callable): a callable function, such as Python callables, builtin
func (Callable): a callable function, such as Python callables, builtin
operators (e.g. :meth:`~torch.add`) and annotated
TorchScript functions.
args (tuple): the argument tuple for the ``func`` invocation.

View File

@ -98,7 +98,7 @@ class ConstraintRegistry(object):
constraint (subclass of :class:`~torch.distributions.constraints.Constraint`):
A subclass of :class:`~torch.distributions.constraints.Constraint`, or
a singleton object of the desired class.
factory (callable): A callable that inputs a constraint object and returns
factory (Callable): A callable that inputs a constraint object and returns
a :class:`~torch.distributions.transforms.Transform` object.
"""
# Support use as decorator.

View File

@ -159,7 +159,7 @@ class _DependentProperty(property, _Dependent):
return constraints.interval(self.low, self.high)
Args:
fn (callable): The function to be decorated.
fn (Callable): The function to be decorated.
is_discrete (bool): Optional value of ``.is_discrete`` in case this
can be computed statically. If not provided, access to the
``.is_discrete`` attribute will raise a NotImplementedError.

View File

@ -1053,7 +1053,7 @@ def script(obj, optimize=None, _frames_up=0, _rcb=None,
and as a decorator ``@torch.jit.script`` for :ref:`torchscript-classes` and functions.
Args:
obj (callable, class, or nn.Module): The ``nn.Module``, function, class type,
obj (Callable, class, or nn.Module): The ``nn.Module``, function, class type,
dictionary, or list to compile.
example_inputs (Union[List[Tuple], Dict[Callable, List[Tuple]], None]): Provide example inputs
to annotate the arguments for a function or ``nn.Module``.

View File

@ -1528,7 +1528,7 @@ class TripletMarginWithDistanceLoss(_Loss):
loss for input tensors using the :math:`l_p` distance as the distance function.
Args:
distance_function (callable, optional): A nonnegative, real-valued function that
distance_function (Callable, optional): A nonnegative, real-valued function that
quantifies the closeness of two tensors. If not specified,
`nn.PairwiseDistance` will be used. Default: ``None``
margin (float, optional): A nonnegative margin representing the minimum difference

View File

@ -1324,7 +1324,7 @@ class DistributedDataParallel(Module, Joinable):
_BufferCommHookLocation.POST_FORWARD means that the
hook will run _after_ the forward pass.
hook (callable): Callable with the following signature:
hook (Callable): Callable with the following signature:
``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``:
NOTE: To maximize performance, users can return a
@ -1364,7 +1364,7 @@ class DistributedDataParallel(Module, Joinable):
It is locally stored by each worker
and shared by all the gradient tensors on the worker.
hook (callable): Callable with the following signature:
hook (Callable): Callable with the following signature:
``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``:
This function is called once the bucket is ready. The

View File

@ -79,7 +79,7 @@ class Adadelta(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -125,7 +125,7 @@ class Adagrad(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -108,7 +108,7 @@ class Adam(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()

View File

@ -83,7 +83,7 @@ class Adamax(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -109,7 +109,7 @@ class AdamW(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()

View File

@ -65,7 +65,7 @@ class ASGD(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -285,7 +285,7 @@ class LBFGS(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable): A closure that reevaluates the model
closure (Callable): A closure that reevaluates the model
and returns the loss.
"""
assert len(self.param_groups) == 1

View File

@ -93,7 +93,7 @@ class NAdam(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -272,7 +272,7 @@ class Optimizer(object):
r"""Performs a single optimization step (parameter update).
Args:
closure (callable): A closure that reevaluates the model and
closure (Callable): A closure that reevaluates the model and
returns the loss. Optional for most optimizers.
.. note::

View File

@ -91,7 +91,7 @@ class RAdam(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -97,7 +97,7 @@ class RMSprop(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -74,7 +74,7 @@ class Rprop(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -119,7 +119,7 @@ class SGD(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -58,7 +58,7 @@ class SparseAdam(Optimizer):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None

View File

@ -281,9 +281,9 @@ class profile(_KinetoProfile):
activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:
``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.
Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.
schedule (callable): callable that takes step (int) as a single parameter and returns
schedule (Callable): callable that takes step (int) as a single parameter and returns
``ProfilerAction`` value that specifies the profiler action to perform at each step.
on_trace_ready (callable): callable that is called at each step when ``schedule``
on_trace_ready (Callable): callable that is called at each step when ``schedule``
returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling.
record_shapes (bool): save information about operator's input shapes.
profile_memory (bool): track tensor memory allocation/deallocation.

View File

@ -294,7 +294,7 @@ class parametrize(_TestParametrizer):
arg_str (str): String of arg names separate by commas (e.g. "x,y").
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name_fn (callable): Optional function that takes in parameters and returns subtest name.
name_fn (Callable): Optional function that takes in parameters and returns subtest name.
"""
def __init__(self, arg_str, arg_values, name_fn=None):
self.arg_names: List[str] = [s.strip() for s in arg_str.split(',')]

View File

@ -146,7 +146,7 @@ class DataLoader(Generic[T_co]):
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
collate_fn (callable, optional): merges a list of samples to form a
collate_fn (Callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
@ -159,7 +159,7 @@ class DataLoader(Generic[T_co]):
will be smaller. (default: ``False``)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
worker_init_fn (callable, optional): If not ``None``, this will be called on each
worker_init_fn (Callable, optional): If not ``None``, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: ``None``)
generator (torch.Generator, optional): If not ``None``, this RNG will be used