mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[ez] fix a bunch of typos in dynamo (#152886)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/152886 Approved by: https://github.com/williamwen42
This commit is contained in:
parent
37c71820f3
commit
e2eb845313
|
|
@ -286,7 +286,7 @@ allow_unspec_int_on_nn_module = False
|
|||
|
||||
# Specify how to optimize a compiled DDP module. The flag accepts a boolean
|
||||
# value or a string. There are 3 modes.
|
||||
# 1. "ddp_optimizer" (or True): with "ddp_ptimizer", Dynamo will automatically
|
||||
# 1. "ddp_optimizer" (or True): with "ddp_optimizer", Dynamo will automatically
|
||||
# split model graph into pieces to match DDP bucket sizes to allow DDP
|
||||
# comm/compute overlap.
|
||||
# 2. "python_reducer" (experimental): this optimization requires the usage
|
||||
|
|
|
|||
|
|
@ -2228,7 +2228,7 @@ class GuardBuilder(GuardBuilderBase):
|
|||
# But we deliberately take this soundness hit because this
|
||||
# usecase is quite rare and there is substantial reduction in
|
||||
# guard overhead.
|
||||
# For numpy tensors, since those are ephemeral, we dont have to
|
||||
# For numpy tensors, since those are ephemeral, we don't have to
|
||||
# insert aliasing guards on them
|
||||
if not (
|
||||
config.skip_no_tensor_aliasing_guards_on_parameters
|
||||
|
|
@ -3129,8 +3129,8 @@ def is_recompiles_verbose_enabled():
|
|||
|
||||
|
||||
# this will only be used if cpp guards are disabled
|
||||
def make_torch_function_mode_stack_guard(intial_stack):
|
||||
types = [type(x) for x in intial_stack]
|
||||
def make_torch_function_mode_stack_guard(initial_stack):
|
||||
types = [type(x) for x in initial_stack]
|
||||
|
||||
def check_torch_function_mode_stack():
|
||||
cur_stack = get_torch_function_mode_stack()
|
||||
|
|
|
|||
|
|
@ -2634,7 +2634,7 @@ class SubgraphTracer(fx.Tracer):
|
|||
self, example_value: Union[torch.SymInt, torch.Tensor], src: Optional[Source]
|
||||
):
|
||||
# The before arg is for inserting symints in the sizes/strides of a tensor
|
||||
# before the tensor. This odering ensures that when we look at the tensor's
|
||||
# before the tensor. This ordering ensures that when we look at the tensor's
|
||||
# symbols, they're already lifted/tracked. E.g. this assumption is used
|
||||
# in insert_deferred_runtime_asserts.
|
||||
def _lift_symbols_in_symint(
|
||||
|
|
|
|||
|
|
@ -675,7 +675,7 @@ class VariableBuilder:
|
|||
self.install_guards(GuardBuilder.TYPE_MATCH)
|
||||
all_const = all(ConstantVariable.is_literal(k) for k in value.keys())
|
||||
|
||||
# For all_const, we dont have to guard on anything yet. We guard on
|
||||
# For all_const, we don't have to guard on anything yet. We guard on
|
||||
# keys lazily by adding a dict_getitem entry for each accessed key.
|
||||
# For cases where we need to guard on all keys, we lazily put guards
|
||||
# during the dict call_method (check dicts.py)
|
||||
|
|
@ -1728,7 +1728,7 @@ class VariableBuilder:
|
|||
|
||||
def wrap_literal(self, value):
|
||||
if type(value) is int:
|
||||
# allowlist has higher precendence over specialization control.
|
||||
# allowlist has higher precedence over specialization control.
|
||||
if is_dynamic_source(self.source.name()):
|
||||
return self.wrap_symint(value, True)
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user