From bafa2c4724bb4935612d3a94e295a9f4bf57e5d1 Mon Sep 17 00:00:00 2001 From: Kiersten Stokes Date: Tue, 25 Apr 2023 23:53:22 +0000 Subject: [PATCH] Change 'w.r.t.' to 'wrt' in function docstrings to fix doc rendering (#100028) Fixes #72428 according to decision reached in comments. I've left other instances of `w.r.t.` in tact (e.g. in parameter/return descriptions, in comments, etc) because there were many, and I didn't' want to go out-of-scope. That being said, I'm happy to change those as well if we'd prefer the consistency! I've also fixed a typo that I came across while grepping for instances. Will update with screenshots once docs are built. Pull Request resolved: https://github.com/pytorch/pytorch/pull/100028 Approved by: https://github.com/albanD --- torch/_tensor.py | 2 +- torch/autograd/function.py | 2 +- torch/autograd/gradcheck.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/torch/_tensor.py b/torch/_tensor.py index 0007be9d27e..ccebfc5c441 100644 --- a/torch/_tensor.py +++ b/torch/_tensor.py @@ -429,7 +429,7 @@ class Tensor(torch._C._TensorBase): def backward( self, gradient=None, retain_graph=None, create_graph=False, inputs=None ): - r"""Computes the gradient of current tensor w.r.t. graph leaves. + r"""Computes the gradient of current tensor wrt graph leaves. The graph is differentiated using the chain rule. If the tensor is non-scalar (i.e. its data has more than one element) and requires diff --git a/torch/autograd/function.py b/torch/autograd/function.py index 880ef803ea3..c9f4529ed96 100644 --- a/torch/autograd/function.py +++ b/torch/autograd/function.py @@ -375,7 +375,7 @@ class _SingleLevelFunction(_C._FunctionBase, FunctionCtx, _HookMixin, metaclass= pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple of booleans representing whether each input needs gradient. E.g., :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the - first input to :func:`forward` needs gradient computated w.r.t. the + first input to :func:`forward` needs gradient computed w.r.t. the output. """ raise NotImplementedError("You must implement either the backward or vjp method for " diff --git a/torch/autograd/gradcheck.py b/torch/autograd/gradcheck.py index 9a7a8698cff..e2382ed2962 100644 --- a/torch/autograd/gradcheck.py +++ b/torch/autograd/gradcheck.py @@ -1449,7 +1449,7 @@ def gradcheck( masked: Optional[bool] = None, ) -> bool: r"""Check gradients computed via small finite differences against analytical - gradients w.r.t. tensors in :attr:`inputs` that are of floating point or complex type + gradients wrt tensors in :attr:`inputs` that are of floating point or complex type and with ``requires_grad=True``. The check between numerical and analytical gradients uses :func:`~torch.allclose`. @@ -1606,7 +1606,7 @@ def gradgradcheck( masked: bool = False, ) -> bool: r"""Check gradients of gradients computed via small finite differences - against analytical gradients w.r.t. tensors in :attr:`inputs` and + against analytical gradients wrt tensors in :attr:`inputs` and :attr:`grad_outputs` that are of floating point or complex type and with ``requires_grad=True``.