From 8701f18bc05d3b36523a32b7f98c49d9d1de0b02 Mon Sep 17 00:00:00 2001 From: Fabian Date: Mon, 29 Sep 2025 17:43:56 +0000 Subject: [PATCH] Adjust ...mark_unbacked() -> ...decorators.mark_unbacked() in logs. (#164131) Pull Request resolved: https://github.com/pytorch/pytorch/pull/164131 Approved by: https://github.com/albanD, https://github.com/Skylion007 --- docs/source/torch.compiler_dynamic_shapes.md | 2 +- test/dynamo/test_logging.py | 4 ++-- torch/fx/experimental/symbolic_shapes.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/torch.compiler_dynamic_shapes.md b/docs/source/torch.compiler_dynamic_shapes.md index 4905eefacc1..89db12c29da 100644 --- a/docs/source/torch.compiler_dynamic_shapes.md +++ b/docs/source/torch.compiler_dynamic_shapes.md @@ -216,7 +216,7 @@ you never specialize. #### `mark_unbacked(tensor, dim)` -The {func}`torch._dynamo.mark_unbacked` function marks a tensor dimension as unbacked. It is unlikely +The {func}`torch._dynamo.decorators.mark_unbacked` function marks a tensor dimension as unbacked. It is unlikely to be the tool you need, but it could be useful if the specialization occurs inside a condition `guard_size_oblivious(x)`, and if using it removes the specialization. Ensure it fixes the specialization and does not introduce a data-dependent error diff --git a/test/dynamo/test_logging.py b/test/dynamo/test_logging.py index b018c7565dd..5f6704b8a39 100644 --- a/test/dynamo/test_logging.py +++ b/test/dynamo/test_logging.py @@ -733,7 +733,7 @@ TRACE FX call mul from test_logging.py:N in fn (LoggingTests.test_trace_call_pre +- __SHAPE_GUARD__: L['x'].size()[0] == 2*L['y'].size()[0] # return x + torch.cat([y, z]) # #:# in # #:# in # +- __SHAPE_GUARD__: L['z'].size()[0] == L['y'].size()[0] # duck sizing added this equality because these variables had the same size 3 (to avoid this specialization, set torch.fx.experimental._config.use_duck_shape = False) +- __SHAPE_GUARD__: ((2*L['y'].size()[0]) % 3) == 0 # if x.size(0) % 3 == 0: # #:# in # #:# in # -+- __SHAPE_GUARD__: 2 <= L['y'].size()[0] # return x + torch.cat([y, z]) # #:# in # (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim))""", # noqa: B950 ++- __SHAPE_GUARD__: 2 <= L['y'].size()[0] # return x + torch.cat([y, z]) # #:# in # (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.decorators.mark_unbacked(tensor, dim))""", # noqa: B950 ) @make_logging_test(guards=True) @@ -749,7 +749,7 @@ TRACE FX call mul from test_logging.py:N in fn (LoggingTests.test_trace_call_pre munge_shape_guards(record.getMessage()), """\ +- __SHAPE_GUARD__: L['x'].size()[0] == 2*L['y'].size()[0] # return any([x.size(0) == y.size(0) * 2]) # #:# in # #:# in # -+- __SHAPE_GUARD__: 2 <= L['y'].size()[0] # return any([x.size(0) == y.size(0) * 2]) # #:# in # (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.mark_unbacked(tensor, dim))""", # noqa: B950 ++- __SHAPE_GUARD__: 2 <= L['y'].size()[0] # return any([x.size(0) == y.size(0) * 2]) # #:# in # (user code shown is first use of this value--the guard itself is not due user code but due to 0/1 specialization in the framework; to avoid specialization try torch._dynamo.decorators.mark_unbacked(tensor, dim))""", # noqa: B950 ) @make_logging_test(guards=True) diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py index c7833d1c99d..e752662f662 100644 --- a/torch/fx/experimental/symbolic_shapes.py +++ b/torch/fx/experimental/symbolic_shapes.py @@ -5077,7 +5077,7 @@ class ShapeEnv: self._get_sloc( "user code shown is first use of this value--the guard itself is not " "due user code but due to 0/1 specialization in the framework; to " - "avoid specialization try torch._dynamo.mark_unbacked(tensor, dim)" + "avoid specialization try torch._dynamo.decorators.mark_unbacked(tensor, dim)" if self.specialize_zero_one else None ),