mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Change 'w.r.t.' to 'wrt' in function docstrings to fix doc rendering (#100028)
Fixes #72428 according to decision reached in comments. I've left other instances of `w.r.t.` in tact (e.g. in parameter/return descriptions, in comments, etc) because there were many, and I didn't' want to go out-of-scope. That being said, I'm happy to change those as well if we'd prefer the consistency! I've also fixed a typo that I came across while grepping for instances. Will update with screenshots once docs are built. Pull Request resolved: https://github.com/pytorch/pytorch/pull/100028 Approved by: https://github.com/albanD
This commit is contained in:
parent
676a23f452
commit
bafa2c4724
|
|
@ -429,7 +429,7 @@ class Tensor(torch._C._TensorBase):
|
||||||
def backward(
|
def backward(
|
||||||
self, gradient=None, retain_graph=None, create_graph=False, inputs=None
|
self, gradient=None, retain_graph=None, create_graph=False, inputs=None
|
||||||
):
|
):
|
||||||
r"""Computes the gradient of current tensor w.r.t. graph leaves.
|
r"""Computes the gradient of current tensor wrt graph leaves.
|
||||||
|
|
||||||
The graph is differentiated using the chain rule. If the tensor is
|
The graph is differentiated using the chain rule. If the tensor is
|
||||||
non-scalar (i.e. its data has more than one element) and requires
|
non-scalar (i.e. its data has more than one element) and requires
|
||||||
|
|
|
||||||
|
|
@ -375,7 +375,7 @@ class _SingleLevelFunction(_C._FunctionBase, FunctionCtx, _HookMixin, metaclass=
|
||||||
pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple
|
pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple
|
||||||
of booleans representing whether each input needs gradient. E.g.,
|
of booleans representing whether each input needs gradient. E.g.,
|
||||||
:func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the
|
:func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the
|
||||||
first input to :func:`forward` needs gradient computated w.r.t. the
|
first input to :func:`forward` needs gradient computed w.r.t. the
|
||||||
output.
|
output.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("You must implement either the backward or vjp method for "
|
raise NotImplementedError("You must implement either the backward or vjp method for "
|
||||||
|
|
|
||||||
|
|
@ -1449,7 +1449,7 @@ def gradcheck(
|
||||||
masked: Optional[bool] = None,
|
masked: Optional[bool] = None,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
r"""Check gradients computed via small finite differences against analytical
|
r"""Check gradients computed via small finite differences against analytical
|
||||||
gradients w.r.t. tensors in :attr:`inputs` that are of floating point or complex type
|
gradients wrt tensors in :attr:`inputs` that are of floating point or complex type
|
||||||
and with ``requires_grad=True``.
|
and with ``requires_grad=True``.
|
||||||
|
|
||||||
The check between numerical and analytical gradients uses :func:`~torch.allclose`.
|
The check between numerical and analytical gradients uses :func:`~torch.allclose`.
|
||||||
|
|
@ -1606,7 +1606,7 @@ def gradgradcheck(
|
||||||
masked: bool = False,
|
masked: bool = False,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
r"""Check gradients of gradients computed via small finite differences
|
r"""Check gradients of gradients computed via small finite differences
|
||||||
against analytical gradients w.r.t. tensors in :attr:`inputs` and
|
against analytical gradients wrt tensors in :attr:`inputs` and
|
||||||
:attr:`grad_outputs` that are of floating point or complex type and with
|
:attr:`grad_outputs` that are of floating point or complex type and with
|
||||||
``requires_grad=True``.
|
``requires_grad=True``.
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user