mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Add a Additional Example that showcases the usage of torch.autograd.functional.jacobian (#155683)
Fixes #132140 As described in the issue, I've added an example that showcases the use of higher-dimensional inputs and outputs, batched inputs, and the vectorize=True with `torch.autograd.functional.jacobian`. Could you please review? Pull Request resolved: https://github.com/pytorch/pytorch/pull/155683 Approved by: https://github.com/soulitzer
This commit is contained in:
parent
e6d71f3789
commit
093fd47dbe
|
|
@ -653,6 +653,16 @@ def jacobian(
|
|||
[0.0000, 3.3963]]),
|
||||
tensor([[3., 0.],
|
||||
[0., 3.]]))
|
||||
|
||||
>>> def linear_model(x):
|
||||
... W = torch.tensor([[2.0, -1.0], [0.0, 1.0]])
|
||||
... b = torch.tensor([1.0, 0.5])
|
||||
... return x @ W.T + b
|
||||
|
||||
>>> x = torch.randn(4, 2, requires_grad=True)
|
||||
>>> jac = jacobian(linear_model, x, vectorize=True)
|
||||
>>> jac.shape
|
||||
torch.Size([4, 2, 4, 2])
|
||||
"""
|
||||
assert strategy in ("forward-mode", "reverse-mode"), (
|
||||
'Expected strategy to be either "forward-mode" or "reverse-mode". Hint: If your '
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user