diff --git a/torch/_dynamo/eval_frame.py b/torch/_dynamo/eval_frame.py index 1e4f227704b..80c2e602534 100644 --- a/torch/_dynamo/eval_frame.py +++ b/torch/_dynamo/eval_frame.py @@ -2103,11 +2103,10 @@ def export( ) and not trace_rules.check(call_to_inspect) ): - # pyrefly: ignore # unbound-name dim_constraints.solve() - # pyrefly: ignore # unbound-name + forced_specializations = dim_constraints.forced_specializations() - # pyrefly: ignore # unbound-name + msg = dim_constraints.prettify_results( original_signature, dynamic_shapes, @@ -2128,11 +2127,10 @@ def export( ) # Error if we have any constraints on static values - # pyrefly: ignore # unbound-name + for k in shape_env.var_to_range.keys(): if isinstance(k, sympy.Integer): constraint_violation_error = ConstraintViolationError( - # pyrefly: ignore # unbound-name f"{''.join(traceback.format_list(shape_env.var_to_stack[k]))}\n" "It appears that you're trying to set a constraint on a " f"value which we evaluated to have a static value of {k}. " diff --git a/torch/_dynamo/functional_export.py b/torch/_dynamo/functional_export.py index 9efdf4f46d6..3a85f50b0fb 100644 --- a/torch/_dynamo/functional_export.py +++ b/torch/_dynamo/functional_export.py @@ -408,11 +408,10 @@ def _suggest_or_raise_constraint_violation( torch._ops.OpOverloadPacket | torch._ops.OpOverload, ) ): - # pyrefly: ignore # unbound-name dim_constraints.solve() - # pyrefly: ignore # unbound-name + forced_specializations = dim_constraints.forced_specializations() - # pyrefly: ignore # unbound-name + msg = dim_constraints.prettify_results( inspect.signature(orig_callable), # type: ignore[attr-defined] dynamic_shapes, @@ -433,11 +432,10 @@ def _suggest_or_raise_constraint_violation( ) # Error if we have any constraints on static values - # pyrefly: ignore # unbound-name + for k in shape_env.var_to_range.keys(): if isinstance(k, sympy.Integer): constraint_violation_error = ConstraintViolationError( - # pyrefly: ignore # unbound-name f"{''.join(traceback.format_list(shape_env.var_to_stack[k]))}\n" "It appears that you're trying to set a constraint on a " f"value which we evaluated to have a static value of {k}. " diff --git a/torch/_dynamo/graph_deduplication.py b/torch/_dynamo/graph_deduplication.py index 5c3bcdb67f4..1ff0777257b 100644 --- a/torch/_dynamo/graph_deduplication.py +++ b/torch/_dynamo/graph_deduplication.py @@ -456,8 +456,10 @@ def _add_mutation_dependencies( for user in mutated_arg.users: if user is node: continue + # pyrefly: ignore # unsupported-operation elif user < node: node_to_additional_deps[node].add(user) + # pyrefly: ignore # unsupported-operation elif user > node: node_to_additional_deps[user].add(node) diff --git a/torch/_dynamo/guards.py b/torch/_dynamo/guards.py index b17e9439e3d..9a33dfc8bb2 100644 --- a/torch/_dynamo/guards.py +++ b/torch/_dynamo/guards.py @@ -4100,13 +4100,12 @@ class CheckFunctionManager: and (cache_entry := self.guard_manager.cache_entry) is not None and (extra_state := self.guard_manager.extra_state) is not None ): - # pyrefly: ignore # unbound-name assert isinstance(cache_entry, CacheEntry) - # pyrefly: ignore # unbound-name + assert isinstance(extra_state, ExtraState) reason = f"Cache line invalidated because {obj_str} got deallocated" deleted_guard_manager = DeletedGuardManagerWrapper(reason) - # pyrefly: ignore # unbound-name + extra_state.invalidate(cache_entry, deleted_guard_manager) self.guard_manager = deleted_guard_manager diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py index f4a834dfe5e..f2b084986ab 100644 --- a/torch/_dynamo/output_graph.py +++ b/torch/_dynamo/output_graph.py @@ -2048,9 +2048,8 @@ class OutputGraph(OutputGraphCommon): tx = self.root_tx assert tx is not None if (ds := tx.distributed_state) is not None and ds.all_states is None: - # pyrefly: ignore # unbound-name compile_pg = ds.compile_pg - # pyrefly: ignore # unbound-name + log.info("compiler_collective %s", ds.local_state) torch._logging.trace_structured( "artifact", @@ -2058,7 +2057,6 @@ class OutputGraph(OutputGraphCommon): "name": "compiler_collective", "encoding": "string", }, - # pyrefly: ignore # unbound-name payload_fn=lambda: ds.local_state.render(), ) device_types = compile_pg._device_types @@ -2072,9 +2070,9 @@ class OutputGraph(OutputGraphCommon): dynamo_timed("compiler_collective", log_pt2_compile_event=True), ): all_states: list[Any] = [None] * compile_pg.size() - # pyrefly: ignore # unbound-name + dist.all_gather_object(all_states, ds.local_state, group=compile_pg) - # pyrefly: ignore # unbound-name + ds.all_states = all_states # Clear speculation log, because are tracing may diverge due to # this information from the compiler collective @@ -2468,7 +2466,6 @@ class OutputGraph(OutputGraphCommon): isinstance(b, torch.SymBool) and (r := b.node.maybe_as_bool()) is not None ): - # pyrefly: ignore # unbound-name return r # TODO: We can also technically remove all cases when the input # doesn't have unbacked inputs, since it's all in the ShapeEnv diff --git a/torch/_dynamo/pgo.py b/torch/_dynamo/pgo.py index 89a73de2133..0dc7c7a9069 100644 --- a/torch/_dynamo/pgo.py +++ b/torch/_dynamo/pgo.py @@ -876,7 +876,6 @@ def get_code_state() -> defaultdict[CodeId, CodeState]: not _CODE_STATE and (sticky_read := torch.compiler.config.pgo_extra_read_key) is not None ): - # pyrefly: ignore # unbound-name extra_read_key = get_extra_cache_key(sticky_read) if extra_read_key is not None: get_extra_remote_code_state(extra_read_key) diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py index f98f0172dc9..57b351fe867 100644 --- a/torch/_dynamo/symbolic_convert.py +++ b/torch/_dynamo/symbolic_convert.py @@ -4410,7 +4410,6 @@ class InstructionTranslator(InstructionTranslatorBase): and isinstance(tos, LocalGeneratorObjectVariable) ): self.stack[-1] = ListIteratorVariable( - # pyrefly: ignore # unbound-name tos.force_unpack_var_sequence(self), mutation_type=ValueMutationNew(), ) diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py index ede0d796cfe..f7955193f32 100644 --- a/torch/_dynamo/utils.py +++ b/torch/_dynamo/utils.py @@ -4214,7 +4214,6 @@ def _extract_anchors_from_expr(segment: str) -> Optional[_Anchors]: # (x) + (y) # ~~^~~~~~~ while (ch := lines[cur_lineno][cur_col]).isspace() or ch in ")\\#": - # pyrefly: ignore # unbound-name if ch in "\\#": cur_lineno, cur_col = nextline(cur_lineno, cur_col) else: diff --git a/torch/_export/pass_base.py b/torch/_export/pass_base.py index b65df30103e..2fb47c0c7d3 100644 --- a/torch/_export/pass_base.py +++ b/torch/_export/pass_base.py @@ -317,7 +317,6 @@ class _ExportPassBaseDeprecatedDoNotUse(PassBase): ) res_proxy.node.meta.update(meta.data) if self.fake_tensor_mode and (shape_env := self.fake_tensor_mode.shape_env): - # pyrefly: ignore # unbound-name if symbol_to_path := compute_unbacked_bindings(shape_env, res_data): res_proxy.node.meta["unbacked_bindings"] = symbol_to_path self.tracer.set_metadata(res_proxy.node, res_data) diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py index 26299281f04..a8994e55072 100644 --- a/torch/_export/serde/serialize.py +++ b/torch/_export/serde/serialize.py @@ -2183,7 +2183,6 @@ class GraphModuleDeserializer(metaclass=Final): simplify=True, ) ): - # pyrefly: ignore # unbound-name node.meta["unbacked_bindings"] = unbacked_bindings assert len(self.unbacked_symbols) == 0 diff --git a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py index 14409e36dc0..19c55c16615 100644 --- a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py +++ b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py @@ -204,7 +204,6 @@ def run_functionalized_fw_and_collect_metadata( suppress_pending = contextlib.nullcontext() fake_mode = detect_fake_mode() if fake_mode and (shape_env := fake_mode.shape_env): - # pyrefly: ignore # unbound-name suppress_pending = shape_env.ignore_fresh_unbacked_symbols() with disable_above, mode, suppress_pending: # precondition: The passed in function already handles unflattening inputs + flattening outputs diff --git a/torch/_higher_order_ops/while_loop.py b/torch/_higher_order_ops/while_loop.py index 9cf9fbd0f56..0543ed2b107 100644 --- a/torch/_higher_order_ops/while_loop.py +++ b/torch/_higher_order_ops/while_loop.py @@ -746,7 +746,6 @@ class WhileLoopAutogradOp(torch.autograd.Function): and (shape_env := loop_count.node.shape_env) and loop_count in shape_env.pending_fresh_unbacked_symbols ): - # pyrefly: ignore # unbound-name shape_env.pending_fresh_unbacked_symbols.remove(loop_count) # Even when body function is not executed, we clone and unsqueeze the input diff --git a/torch/_inductor/__init__.py b/torch/_inductor/__init__.py index a49b64a28cd..896eba08901 100644 --- a/torch/_inductor/__init__.py +++ b/torch/_inductor/__init__.py @@ -132,7 +132,6 @@ def aoti_compile_and_package( ) or ( isinstance(package_path, (str, os.PathLike)) - # pyrefly: ignore # no-matching-overload and os.fspath(package_path).endswith(".pt2") ) ), ( diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py index 85ea0a79d5f..a504b54f132 100644 --- a/torch/_inductor/autotune_process.py +++ b/torch/_inductor/autotune_process.py @@ -557,7 +557,6 @@ class GPUDeviceBenchmarkMixin: res = benchmarker.benchmark_gpu(fn) device_interface.synchronize() # shake out any CUDA errors - # pyrefly: ignore # bad-return return res diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py index 5a953f80a1a..a125a0234ba 100644 --- a/torch/_inductor/codegen/common.py +++ b/torch/_inductor/codegen/common.py @@ -1737,7 +1737,6 @@ class KernelArgs: ) ) for outer, inner in chain( - # pyrefly: ignore # bad-argument-type self.input_buffers.items(), # pyrefly: ignore # bad-argument-type self.output_buffers.items(), diff --git a/torch/_inductor/codegen/cpp_gemm_template.py b/torch/_inductor/codegen/cpp_gemm_template.py index cb17b5a7deb..48a4511cb8f 100644 --- a/torch/_inductor/codegen/cpp_gemm_template.py +++ b/torch/_inductor/codegen/cpp_gemm_template.py @@ -1478,7 +1478,6 @@ class CppGemmTemplate(CppTemplate): assert isinstance(template_buffer, ir.IRNode) gemm_output_name = f"{template_buffer.get_name()}_GemmOut" gemm_output_buffer = ir.Buffer( - # pyrefly: ignore # missing-attribute name=gemm_output_name, # pyrefly: ignore # missing-attribute layout=template_buffer.layout, @@ -1502,7 +1501,6 @@ class CppGemmTemplate(CppTemplate): reindexers.append(None) if i < len(epilogue_creators) - 1: current_input_buffer = ir.Buffer( - # pyrefly: ignore # missing-attribute name=buffer_name, # pyrefly: ignore # missing-attribute layout=template_buffer.layout, diff --git a/torch/_inductor/codegen/cpp_wrapper_gpu.py b/torch/_inductor/codegen/cpp_wrapper_gpu.py index dd4a3a984d3..011979b0638 100644 --- a/torch/_inductor/codegen/cpp_wrapper_gpu.py +++ b/torch/_inductor/codegen/cpp_wrapper_gpu.py @@ -822,7 +822,6 @@ class CppWrapperGpu(CppWrapperCpu): if triton: call_args, arg_types = self.prepare_triton_wrapper_args( - # pyrefly: ignore # bad-argument-type call_args, # pyrefly: ignore # bad-argument-type arg_types, diff --git a/torch/_inductor/codegen/mps.py b/torch/_inductor/codegen/mps.py index fb3939531b7..55a7d6eb458 100644 --- a/torch/_inductor/codegen/mps.py +++ b/torch/_inductor/codegen/mps.py @@ -680,7 +680,6 @@ class MetalKernel(SIMDKernel): ) idx_val = self._new_idxvar(dtype, default_value=0, is_threadgroup=False) # type: ignore[assignment] idx_var = next( - # pyrefly: ignore # missing-argument t for t in self.range_tree_nodes.values() # pyrefly: ignore # missing-argument @@ -863,7 +862,6 @@ class MetalKernel(SIMDKernel): if self.inside_reduction: total_reduction_size = math.prod( - # pyrefly: ignore # missing-argument t.numel for t in self.range_trees # pyrefly: ignore # missing-argument diff --git a/torch/_inductor/codegen/simd.py b/torch/_inductor/codegen/simd.py index 79d0b603220..fce9c017957 100644 --- a/torch/_inductor/codegen/simd.py +++ b/torch/_inductor/codegen/simd.py @@ -965,7 +965,6 @@ class SIMDKernel(Kernel[CSEVariableType], Generic[CSEVariableType]): def active_range_trees(self) -> list[IterationRangesRoot]: return [ - # pyrefly: ignore # missing-argument t for t in self.range_trees # pyrefly: ignore # missing-argument diff --git a/torch/_inductor/codegen/wrapper_fxir.py b/torch/_inductor/codegen/wrapper_fxir.py index 56fd28b828a..87e1124fd29 100644 --- a/torch/_inductor/codegen/wrapper_fxir.py +++ b/torch/_inductor/codegen/wrapper_fxir.py @@ -1036,7 +1036,6 @@ class FxConverter: # Add constants stored as Triton metadata, in signature order. call_kwargs |= constants new_call_args = [ - # pyrefly: ignore # missing-attribute call_kwargs[key] for key in signature # pyrefly: ignore # missing-attribute diff --git a/torch/_inductor/comms.py b/torch/_inductor/comms.py index 3cf0156e043..043252610ae 100644 --- a/torch/_inductor/comms.py +++ b/torch/_inductor/comms.py @@ -826,11 +826,10 @@ def _schedule_for_comm( collective_cost > 0 and (candidate := get_overlapping_candidate()) is not None ): - # pyrefly: ignore # unbound-name ready.remove(candidate) - # pyrefly: ignore # unbound-name + schedule(candidate.snode) - # pyrefly: ignore # unbound-name + collective_cost -= snode_to_cost[candidate.snode] heapq.heapify(ready) @@ -1098,7 +1097,7 @@ def _sink_waits_iterative_internal( info.grouped_info = _group_names(gns) candidate = _next[candidate] continue - # pyrefly: ignore # unbound-name + elif (data_dep is None) and both_contain_comms: info.limiting_factor = ( f"collective ordering {_group_names(gns)}" diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py index eba5da82c62..85c7f2884eb 100644 --- a/torch/_inductor/compile_fx.py +++ b/torch/_inductor/compile_fx.py @@ -271,7 +271,6 @@ def record_original_output_strides(gm: GraphModule) -> None: and (val := output.meta.get("val")) is not None and isinstance(val, torch.Tensor) ): - # pyrefly: ignore # unbound-name output_strides.append(val.stride()) else: # pyrefly: ignore # bad-argument-type diff --git a/torch/_inductor/compile_fx_ext.py b/torch/_inductor/compile_fx_ext.py index 7e3c5bb7cfa..9c032fafb96 100644 --- a/torch/_inductor/compile_fx_ext.py +++ b/torch/_inductor/compile_fx_ext.py @@ -620,7 +620,6 @@ class _OutOfProcessFxCompile(_SerializedFxCompile): if output.warning_replay: for w in output.warning_replay: - # pyrefly: ignore # no-matching-overload warnings.warn_explicit( message=w.message, category=w.category, diff --git a/torch/_inductor/decomposition.py b/torch/_inductor/decomposition.py index 18e338137bd..f137ab297a3 100644 --- a/torch/_inductor/decomposition.py +++ b/torch/_inductor/decomposition.py @@ -544,7 +544,6 @@ def amax( keepdim: bool = False, ) -> torch.Tensor: if self.dtype == torch.bool: - # pyrefly: ignore # no-matching-overload return torch.any(self, dim=dim, keepdim=keepdim) return NotImplemented @@ -556,7 +555,6 @@ def amin( keepdim: bool = False, ) -> torch.Tensor: if self.dtype == torch.bool: - # pyrefly: ignore # no-matching-overload return torch.all(self, dim=dim, keepdim=keepdim) return NotImplemented diff --git a/torch/_inductor/fx_utils.py b/torch/_inductor/fx_utils.py index 4c0a2ff35e1..ec3a1d83d92 100644 --- a/torch/_inductor/fx_utils.py +++ b/torch/_inductor/fx_utils.py @@ -238,7 +238,7 @@ class FakeTensorUpdater: symbol_to_path := compute_unbacked_bindings(shape_env, new_fake_tensor) ): # Refresh the bindings to the new symbols - # pyrefly: ignore # unbound-name + node.meta["unbacked_bindings"] = symbol_to_path existing_storages[get_node_storage(node)] += 1 diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py index ef93b6dc6c0..7ec619731ad 100644 --- a/torch/_inductor/lowering.py +++ b/torch/_inductor/lowering.py @@ -6500,12 +6500,10 @@ def div_prim(a, b): # see https://github.com/pytorch/pytorch/issues/157959 if (divisor := get_constant_value(b)) is not None and a.get_device().type != "cpu": # Replace divide by constant with multiply by reciprocal - # pyrefly: ignore # unbound-name + if divisor.value == 0: - # pyrefly: ignore # unbound-name reciprocal = math.copysign(float("inf"), divisor.value) else: - # pyrefly: ignore # unbound-name reciprocal = 1.0 / divisor.value return mul(a, reciprocal) diff --git a/torch/_inductor/package/package.py b/torch/_inductor/package/package.py index 7c7884c92db..bd11d033cad 100644 --- a/torch/_inductor/package/package.py +++ b/torch/_inductor/package/package.py @@ -131,7 +131,6 @@ def load_package( ) return AOTICompiledModel(loader) - # pyrefly: ignore # no-matching-overload path = os.fspath(path) # AOTIModelPackageLoader expects (str, str) loader = torch._C._aoti.AOTIModelPackageLoader( path, model_name, run_single_threaded, num_runners, device_index diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index 82ab51556fb..0d91c880ce3 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -2676,7 +2676,6 @@ class Scheduler: and (dep := next(iter(node.read_writes.writes))) and isinstance(dep, MemoryDep) ): - # pyrefly: ignore # unbound-name node_mode = dep.mode else: node_mode = None @@ -4360,7 +4359,6 @@ class Scheduler: if config.expand_dimension_for_pointwise_nodes and ( expand_analysis := self.get_expand_dim_for_pointwise_nodes(node1, node2) ): - # pyrefly: ignore # unbound-name (expand_dim, smaller_node, expand_size) = expand_analysis smaller_node.expand_dimension_for_pointwise_node(expand_dim, expand_size) shared_data_score = self.score_fusion_memory(node1, node2) @@ -4669,7 +4667,6 @@ class Scheduler: device.type == "cuda" and (device_props := torch.cuda.get_device_properties(device)).major < 7 ): - # pyrefly: ignore # unbound-name raise GPUTooOldForTriton(device_props, inspect.currentframe()) elif is_gpu(device.type) and not device.type == "mps": raise TritonMissing(inspect.currentframe()) @@ -4967,7 +4964,6 @@ class Scheduler: if isinstance(buf.node, ir.MutationOutput) and ( real_name := self.mutation_real_name.get(buf_name, None) ): - # pyrefly: ignore # unbound-name return is_none_layout(real_name) return True diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 9fe7f97372f..ee09993caee 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -3681,8 +3681,8 @@ class AlgorithmSelectorCache(PersistentCache): ), node.get_device(), node.get_dtype(), - # pyrefly: ignore # missing-attribute V.graph.sizevars.atomically_apply_size_hint( + # pyrefly: ignore # missing-attribute node.layout.offset, fallback=config.unbacked_symint_fallback, hint_override=hint_override, diff --git a/torch/_inductor/template_heuristics/triton.py b/torch/_inductor/template_heuristics/triton.py index c8c672c6911..850215a3871 100644 --- a/torch/_inductor/template_heuristics/triton.py +++ b/torch/_inductor/template_heuristics/triton.py @@ -1652,7 +1652,7 @@ class MMTemplateConfigMixin(GemmMaxAutotuneTemplateConfigHeuristics): ) # Build options dict - # pyrefly: ignore # no-matching-overload + options_dict = dict( EVEN_K=even_k_symbolic, USE_FAST_ACCUM=False, # Option for _scaled_mm diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index 26ce8c60520..b707fc791ef 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -3764,7 +3764,6 @@ def maybe_log_cudagraph_partition( and (fx_node := ir_node.get_origin_node()) and (stack_trace := fx_node.meta.get("stack_trace", None)) ): - # pyrefly: ignore # unbound-name warning_msg = f"{warning_msg}. Found from : \n {stack_trace}" perf_hint_log.warning(warning_msg) diff --git a/torch/_inductor/wrapper_benchmark.py b/torch/_inductor/wrapper_benchmark.py index f8430064917..9a527471c8c 100644 --- a/torch/_inductor/wrapper_benchmark.py +++ b/torch/_inductor/wrapper_benchmark.py @@ -144,7 +144,6 @@ def benchmark_all_kernels( launcher = triton_kernel.launchers[0] print( get_info_str( - # pyrefly: ignore # bad-argument-type ms, launcher.n_regs, launcher.n_spills, diff --git a/torch/_library/fake_profile.py b/torch/_library/fake_profile.py index 9e0b8cccdb5..984a996b90d 100644 --- a/torch/_library/fake_profile.py +++ b/torch/_library/fake_profile.py @@ -246,7 +246,6 @@ def save_op_profiles(op_profiles: dict[str, set[OpProfile]], f: FileLike) -> Non yaml_str = generate_yaml_from_profiles(op_profiles) if isinstance(f, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload f = os.fspath(f) with open(f, "w") as file: @@ -312,7 +311,6 @@ def load_op_profiles(f: FileLike) -> dict[str, set[OpProfile]]: Loads the saved operator profiles from `save_op_profiles`. """ if isinstance(f, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload f = os.fspath(f) with open(f) as file: diff --git a/torch/_library/opaque_object.py b/torch/_library/opaque_object.py index cbe8795ec53..ce9b9cfe38a 100644 --- a/torch/_library/opaque_object.py +++ b/torch/_library/opaque_object.py @@ -173,7 +173,7 @@ def register_opaque_type(cls: Any, name: Optional[str] = None) -> None: f"Unable to accept name, {name}, for this opaque type as it contains a '.'" ) _OPAQUE_TYPES[cls] = name - # pyrefly: ignore # missing-attribute + torch._C._register_opaque_type(name) @@ -183,5 +183,5 @@ def is_opaque_type(cls: Any) -> bool: """ if cls not in _OPAQUE_TYPES: return False - # pyrefly: ignore # missing-attribute + return torch._C._is_opaque_type_registered(_OPAQUE_TYPES[cls]) diff --git a/torch/_logging/_internal.py b/torch/_logging/_internal.py index a8426861026..04298b7cdac 100644 --- a/torch/_logging/_internal.py +++ b/torch/_logging/_internal.py @@ -914,7 +914,6 @@ class TorchLogsFormatter(logging.Formatter): and (trace_id := torch._guards.CompileContext.current_trace_id()) is not None ): - # pyrefly: ignore # unbound-name record.traceid = f" [{trace_id}]" glog_level_to_abbr = { diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py index 822f949d536..9e70cdf9a9e 100644 --- a/torch/_refs/__init__.py +++ b/torch/_refs/__init__.py @@ -1336,9 +1336,9 @@ def float_power( # Float power has the following contiguous cast behavior to be # consistent with its C++ impl - # pyrefly: ignore # no-matching-overload + a = _maybe_convert_to_dtype(a, dtype) - # pyrefly: ignore # no-matching-overload + b = _maybe_convert_to_dtype(b, dtype) a, b = _maybe_broadcast(a, b) @@ -2348,7 +2348,6 @@ def all( dim: Optional[DimsType] = None, keepdim: bool = False, ) -> TensorLikeType: - # pyrefly: ignore # no-matching-overload result = torch.logical_not(torch.any(torch.logical_not(a), dim, keepdim=keepdim)) if a.dtype == torch.uint8: @@ -3245,7 +3244,7 @@ def _normalize( mean (Tensor): mean of the tensor along norm_dims. rstd (Tensor): 1/std of the tensor along norm_dims. """ - # pyrefly: ignore # no-matching-overload + norm_dims = utils.canonicalize_dims(a.ndim, norm_dims) computation_dtype = utils.get_computation_dtype(a.dtype) a_acc = _maybe_convert_to_dtype(a, computation_dtype) @@ -3975,7 +3974,7 @@ def reshape_as(self: TensorLikeType, other: TensorLikeType) -> TensorLikeType: @out_wrapper() def roll(a: TensorLikeType, shifts: DimsType, dims: DimsType = ()) -> TensorLikeType: """Reference implementation of :func:`torch.roll`.""" - # pyrefly: ignore # no-matching-overload + dims = utils.canonicalize_dims(a.ndim, dims) # ATen specifies int[1] type for shifts and dims which expands integers to tuples of length 1 if not isinstance(shifts, Iterable): @@ -4286,7 +4285,7 @@ def squeeze(a: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType return prims.squeeze(a, dims) if dims else prims.view_of(a) ndim = a.ndim - # pyrefly: ignore # no-matching-overload + dim = utils.canonicalize_dims(ndim, dim) dims = (dim,) if isinstance(dim, Dim) else dim # Short-circuits if the tensor has no dimensions diff --git a/torch/_refs/linalg/__init__.py b/torch/_refs/linalg/__init__.py index c6ed344f323..04949e2a7c0 100644 --- a/torch/_refs/linalg/__init__.py +++ b/torch/_refs/linalg/__init__.py @@ -216,7 +216,7 @@ def matrix_norm( # shape check_is_matrix(A, "linalg.matrix_norm") # dim - # pyrefly: ignore # no-matching-overload + dim = utils.canonicalize_dims(A.ndim, dim) if isinstance(dim, Dim): dim = (dim,) # type: ignore[assignment] diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py index a4dadc558f4..dbb297867be 100644 --- a/torch/_subclasses/fake_tensor.py +++ b/torch/_subclasses/fake_tensor.py @@ -2620,7 +2620,7 @@ class FakeTensorMode(TorchDispatchMode): and s.rhs == 1 ): assert self.shape_env is not None - # pyrefly: ignore # unbound-name + self.shape_env.set_unbacked_var_to_val(s, int(real_t)) if real_out is not nil: diff --git a/torch/_tensor.py b/torch/_tensor.py index c36ba126d64..23195f720c5 100644 --- a/torch/_tensor.py +++ b/torch/_tensor.py @@ -1110,7 +1110,6 @@ class Tensor(torch._C.TensorBase): @_handle_torch_function_and_wrap_type_error_to_not_implemented def __rsub__(self, other: Union["Tensor", int, float, bool, complex]) -> "Tensor": - # pyrefly: ignore # no-matching-overload return _C._VariableFunctions.rsub(self, other) @_handle_torch_function_and_wrap_type_error_to_not_implemented @@ -1137,7 +1136,7 @@ class Tensor(torch._C.TensorBase): @_handle_torch_function_and_wrap_type_error_to_not_implemented def __rmod__(self, other: Union["Tensor", int, float, bool, complex]) -> "Tensor": - return torch.remainder(other, self) # pyrefly: ignore # no-matching-overload + return torch.remainder(other, self) def __format__(self, format_spec): if has_torch_function_unary(self): @@ -1150,7 +1149,7 @@ class Tensor(torch._C.TensorBase): @_handle_torch_function_and_wrap_type_error_to_not_implemented def __rpow__(self, other: Union["Tensor", int, float, bool, complex]) -> "Tensor": - return torch.pow(other, self) # pyrefly: ignore # no-matching-overload + return torch.pow(other, self) @_handle_torch_function_and_wrap_type_error_to_not_implemented def __floordiv__(self, other: Union["Tensor", int, float, bool]) -> "Tensor": # type: ignore[override] @@ -1166,14 +1165,12 @@ class Tensor(torch._C.TensorBase): def __rlshift__( self, other: Union["Tensor", int, float, bool, complex] ) -> "Tensor": - # pyrefly: ignore # no-matching-overload return torch.bitwise_left_shift(other, self) @_handle_torch_function_and_wrap_type_error_to_not_implemented def __rrshift__( self, other: Union["Tensor", int, float, bool, complex] ) -> "Tensor": - # pyrefly: ignore # no-matching-overload return torch.bitwise_right_shift(other, self) @_handle_torch_function_and_wrap_type_error_to_not_implemented diff --git a/torch/_utils.py b/torch/_utils.py index 87d17c374de..095f256aac3 100644 --- a/torch/_utils.py +++ b/torch/_utils.py @@ -744,10 +744,7 @@ class ExceptionWrapper: if exc_info is None: exc_info = sys.exc_info() self.exc_type = exc_info[0] - self.exc_msg = "".join( - # pyrefly: ignore # no-matching-overload - traceback.format_exception(*exc_info) - ) + self.exc_msg = "".join(traceback.format_exception(*exc_info)) self.where = where def reraise(self): diff --git a/torch/_utils_internal.py b/torch/_utils_internal.py index 37bb1837871..10c0bf23f85 100644 --- a/torch/_utils_internal.py +++ b/torch/_utils_internal.py @@ -89,7 +89,6 @@ def compile_time_strobelight_meta( skip := kwargs["skip"], int, ): - # pyrefly: ignore # unbound-name kwargs["skip"] = skip + 1 # This is not needed but we have it here to avoid having profile_compile_time diff --git a/torch/ao/nn/quantized/reference/modules/conv.py b/torch/ao/nn/quantized/reference/modules/conv.py index 1e9cbceb7c1..aa9cf1aecda 100644 --- a/torch/ao/nn/quantized/reference/modules/conv.py +++ b/torch/ao/nn/quantized/reference/modules/conv.py @@ -95,7 +95,7 @@ class Conv1d(_ConvNd, nn.Conv1d): and the backend should be able to fuse the ops with `*` into a quantized conv1d """ weight_quant_dequant = self.get_weight() - # pyrefly: ignore # no-matching-overload + result = F.conv1d( x, weight_quant_dequant, @@ -160,7 +160,7 @@ class Conv2d(_ConvNd, nn.Conv2d): and the backend should be able to fuse the ops with `*` into a quantized conv2d """ weight_quant_dequant = self.get_weight() - # pyrefly: ignore # no-matching-overload + result = F.conv2d( x, weight_quant_dequant, @@ -225,7 +225,7 @@ class Conv3d(_ConvNd, nn.Conv3d): and the backend should be able to fuse the ops with `*` into a quantized conv3d """ weight_quant_dequant = self.get_weight() - # pyrefly: ignore # no-matching-overload + result = F.conv3d( x, weight_quant_dequant, diff --git a/torch/ao/ns/fx/graph_passes.py b/torch/ao/ns/fx/graph_passes.py index 7e1062257c8..79123428c03 100644 --- a/torch/ao/ns/fx/graph_passes.py +++ b/torch/ao/ns/fx/graph_passes.py @@ -1095,6 +1095,7 @@ def create_a_shadows_b( # pyrefly: ignore # unbound-name if not isinstance(input_logger, list): raise AssertionError( + # pyrefly: ignore # unbound-name f"Expected list, got {type(input_logger)}" ) # pyrefly: ignore # unbound-name diff --git a/torch/ao/quantization/experimental/adaround_optimization.py b/torch/ao/quantization/experimental/adaround_optimization.py index 31ce16537e4..1b34c8cbfdb 100644 --- a/torch/ao/quantization/experimental/adaround_optimization.py +++ b/torch/ao/quantization/experimental/adaround_optimization.py @@ -127,7 +127,6 @@ class AdaptiveRoundingOptimizer: @torch.no_grad() def feed_forward(self, x, weight, module): if isinstance(module, torch.nn.Conv1d): - # pyrefly: ignore # no-matching-overload out = torch.nn.functional.conv1d( x, weight, diff --git a/torch/ao/quantization/pt2e/utils.py b/torch/ao/quantization/pt2e/utils.py index 4f994f710a1..320429a5677 100644 --- a/torch/ao/quantization/pt2e/utils.py +++ b/torch/ao/quantization/pt2e/utils.py @@ -90,7 +90,6 @@ def _find_q_dq_node_for_user( and arg.op == "call_function" and arg.target in _QUANTIZE_OPS ): - # pyrefly: ignore # unbound-name q_node = arg return (q_node, dq_node) diff --git a/torch/autograd/__init__.py b/torch/autograd/__init__.py index c0a8d30df32..968360a6d1c 100644 --- a/torch/autograd/__init__.py +++ b/torch/autograd/__init__.py @@ -92,7 +92,7 @@ def _make_grads( is_grads_batched: bool, ) -> tuple[_OptionalTensor, ...]: new_grads: list[_OptionalTensor] = [] - # pyrefly: ignore # no-matching-overload + for out, grad in zip(outputs, grads): out = cast(Union[torch.Tensor, graph.GradientEdge], out) out_size = None diff --git a/torch/backends/cuda/__init__.py b/torch/backends/cuda/__init__.py index d895ab377e7..d62c2b05a1e 100644 --- a/torch/backends/cuda/__init__.py +++ b/torch/backends/cuda/__init__.py @@ -155,25 +155,21 @@ class cuBLASModule: if name == "allow_tf32": return torch._C._get_cublas_allow_tf32() elif name == "allow_fp16_reduced_precision_reduction": - # pyrefly: ignore # not-iterable allow_reduced_precision, _ = ( torch._C._get_cublas_allow_fp16_reduced_precision_reduction() ) return allow_reduced_precision elif name == "allow_fp16_reduced_precision_reduction_split_k": - # pyrefly: ignore # not-iterable _, allow_splitk = ( torch._C._get_cublas_allow_fp16_reduced_precision_reduction() ) return allow_splitk elif name == "allow_bf16_reduced_precision_reduction": - # pyrefly: ignore # not-iterable allow_reduced_precision, _ = ( torch._C._get_cublas_allow_bf16_reduced_precision_reduction() ) return allow_reduced_precision elif name == "allow_bf16_reduced_precision_reduction_split_k": - # pyrefly: ignore # not-iterable _, allow_splitk = ( torch._C._get_cublas_allow_bf16_reduced_precision_reduction() ) @@ -193,7 +189,6 @@ class cuBLASModule: ) return torch._C._set_cublas_allow_fp16_reduced_precision_reduction( allow_reduced_precision, - # pyrefly: ignore # bad-argument-count allow_splitk, ) elif name == "allow_bf16_reduced_precision_reduction": @@ -202,7 +197,6 @@ class cuBLASModule: ) return torch._C._set_cublas_allow_bf16_reduced_precision_reduction( allow_reduced_precision, - # pyrefly: ignore # bad-argument-count allow_splitk, ) elif name == "allow_fp16_accumulation": diff --git a/torch/cuda/green_contexts.py b/torch/cuda/green_contexts.py index 078cd06e19c..33f74a5c2b8 100644 --- a/torch/cuda/green_contexts.py +++ b/torch/cuda/green_contexts.py @@ -10,6 +10,7 @@ if hasattr(torch._C, "_CUDAGreenContext"): # Python shim helps Sphinx process docstrings more reliably. +# pyrefly: ignore # invalid-inheritance class GreenContext(_GreenContext): r"""Wrapper around a CUDA green context. diff --git a/torch/distributed/checkpoint/logger.py b/torch/distributed/checkpoint/logger.py index f5373da83b6..68ad0009c44 100644 --- a/torch/distributed/checkpoint/logger.py +++ b/torch/distributed/checkpoint/logger.py @@ -37,7 +37,6 @@ def _msg_dict_from_dcp_method_args(*args, **kwargs) -> dict[str, Any]: checkpoint_id = kwargs.get("checkpoint_id") if not checkpoint_id and (serializer := storage_writer or storage_reader): - # pyrefly: ignore # unbound-name checkpoint_id = getattr(serializer, "checkpoint_id", None) msg_dict["checkpoint_id"] = ( diff --git a/torch/distributed/checkpoint/state_dict.py b/torch/distributed/checkpoint/state_dict.py index d401db7a846..f023eb949ce 100644 --- a/torch/distributed/checkpoint/state_dict.py +++ b/torch/distributed/checkpoint/state_dict.py @@ -1227,7 +1227,6 @@ def _unflatten_model_state_dict( if not state_dict: return {} - # pyrefly: ignore # no-matching-overload if isinstance(next(iter(state_dict.keys())), nn.Module): warnings.warn( "Passing model_state_dict as a ``Dict[nn.Module, Dict[str, Any]]``" diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index c3984717651..96b2eeb7ef2 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -393,7 +393,7 @@ class BackendConfig: # e.g. "nccl", "gloo", "ucc", "mpi" supported_devices = Backend.backend_capability[backend.lower()] backend_val = Backend(backend) - # pyrefly: ignore # bad-assignment + self.device_backend_map = dict.fromkeys(supported_devices, backend_val) elif ":" in backend.lower(): # Backend specified in "device:backend" format diff --git a/torch/distributed/pipelining/microbatch.py b/torch/distributed/pipelining/microbatch.py index 06c4edb9b3d..713f8e1be43 100644 --- a/torch/distributed/pipelining/microbatch.py +++ b/torch/distributed/pipelining/microbatch.py @@ -290,7 +290,6 @@ def _shard_dict_of_args( f"Unsupported chunk spec: {spec} and value: {v} combination." ) - # pyrefly: ignore # no-matching-overload for _flat_split_result, _v_split in zip( flat_split_results, v_splits, strict=True ): diff --git a/torch/distributed/tensor/experimental/_attention.py b/torch/distributed/tensor/experimental/_attention.py index a9e9104eb44..5939a247c2f 100644 --- a/torch/distributed/tensor/experimental/_attention.py +++ b/torch/distributed/tensor/experimental/_attention.py @@ -1327,7 +1327,6 @@ class _ContextParallel(ParallelStyle): placement = [Shard(self.seq_dim)] all_args = [] - # pyrefly: ignore # bad-assignment, bad-argument-type for arg in itertools.chain(args, kwargs.values()): if isinstance(arg, torch.Tensor): if isinstance(arg, DTensor): diff --git a/torch/distributed/tensor/parallel/style.py b/torch/distributed/tensor/parallel/style.py index 3625f36fefa..032179bafa3 100644 --- a/torch/distributed/tensor/parallel/style.py +++ b/torch/distributed/tensor/parallel/style.py @@ -548,7 +548,7 @@ class PrepareModuleInput(ParallelStyle): assert self.desired_input_layouts is not None, ( "desired module inputs should not be None!" ) - # pyrefly: ignore # no-matching-overload + for inp, input_layout, desired_layout in zip( inputs, self.input_layouts, self.desired_input_layouts ): @@ -664,7 +664,7 @@ class PrepareModuleOutput(ParallelStyle): raise ValueError( "module outputs and output_layouts should have same length!" ) - # pyrefly: ignore # no-matching-overload + for out, out_layout, desired_out_layout in zip( outputs, self.output_layouts, self.desired_output_layouts ): diff --git a/torch/distributed/utils.py b/torch/distributed/utils.py index 1dc123b50db..aae098056bb 100644 --- a/torch/distributed/utils.py +++ b/torch/distributed/utils.py @@ -59,7 +59,7 @@ def _cast_forward_inputs( def cast_fn(x: torch.Tensor) -> torch.Tensor: if not torch.is_floating_point(x) or x.dtype == dtype: return x - # pyrefly: ignore # no-matching-overload + return x.to(dtype) return (_apply_to_tensors(cast_fn, args), _apply_to_tensors(cast_fn, kwargs)) diff --git a/torch/export/__init__.py b/torch/export/__init__.py index 83b6b87fe4d..a76cda0682c 100644 --- a/torch/export/__init__.py +++ b/torch/export/__init__.py @@ -436,7 +436,6 @@ def load( print(ep(torch.randn(5))) """ if isinstance(f, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload f = os.fspath(f) extra_files = extra_files or {} diff --git a/torch/export/_trace.py b/torch/export/_trace.py index 779b7cfe496..a2c47ca3e5d 100644 --- a/torch/export/_trace.py +++ b/torch/export/_trace.py @@ -514,7 +514,6 @@ def _replace_unbacked_bindings(gm: torch.fx.GraphModule) -> None: simplify=True, ) ): - # pyrefly: ignore # unbound-name node.meta["unbacked_bindings"] = unbacked_bindings diff --git a/torch/export/pt2_archive/_package.py b/torch/export/pt2_archive/_package.py index 1a2e74b84e3..7f96ab75be0 100644 --- a/torch/export/pt2_archive/_package.py +++ b/torch/export/pt2_archive/_package.py @@ -683,7 +683,6 @@ def package_pt2( if not ( (isinstance(f, (io.IOBase, IO)) and f.writable() and f.seekable()) - # pyrefly: ignore # no-matching-overload or (isinstance(f, (str, os.PathLike)) and os.fspath(f).endswith(".pt2")) or (isinstance(f, tempfile._TemporaryFileWrapper) and f.name.endswith(".pt2")) ): @@ -695,7 +694,6 @@ def package_pt2( ) if isinstance(f, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload f = os.fspath(f) # pyrefly: ignore # bad-argument-type @@ -1086,7 +1084,6 @@ def load_pt2( if not ( (isinstance(f, (io.IOBase, IO)) and f.readable() and f.seekable()) - # pyrefly: ignore # no-matching-overload or (isinstance(f, (str, os.PathLike)) and os.fspath(f).endswith(".pt2")) ): # TODO: turn this into an error in 2.9 @@ -1097,7 +1094,6 @@ def load_pt2( ) if isinstance(f, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload f = os.fspath(f) weights = {} @@ -1167,7 +1163,6 @@ def load_pt2( else: aoti_runners = { model_name: _load_aoti( - # pyrefly: ignore # bad-argument-type f, model_name, run_single_threaded, diff --git a/torch/fx/experimental/proxy_tensor.py b/torch/fx/experimental/proxy_tensor.py index ab3a754ba5d..3d3dd1cb22c 100644 --- a/torch/fx/experimental/proxy_tensor.py +++ b/torch/fx/experimental/proxy_tensor.py @@ -916,7 +916,6 @@ def fetch_object_proxy( def fetch_object_proxy( tracer: _ProxyTracer, t: Union[Tensor, _AnyScriptObjectType, PySymType] ) -> object: - # pyrefly: ignore # no-matching-overload return get_proxy_slot(t, tracer, t) @@ -965,7 +964,6 @@ def _fetch_proxies_and_all_constant_flag( """ f_flat_args_kwargs = [ ( - # pyrefly: ignore # no-matching-overload fetch_object_proxy(tracer, x) if isinstance(x, (Tensor, _AnyScriptObject)) else x @@ -2497,7 +2495,6 @@ class _MakefxTracer: ): from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts - # pyrefly: ignore # unbound-name insert_deferred_runtime_asserts(t, fake_mode.shape_env, "reenter_make_fx") t.recompile() # TODO: kind of a bad way to do it, should maybe figure out a better way diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py index 67f8c0f6657..7ece1b47942 100644 --- a/torch/fx/experimental/symbolic_shapes.py +++ b/torch/fx/experimental/symbolic_shapes.py @@ -621,13 +621,12 @@ def rebind_unbacked( ): # This is what the pattern match above is testing repacked = _sympy_cast_symbool_to_symint_guardless( - # pyrefly: ignore # unbound-name sympy.Eq(new_raw_u1, 1) ) assert repacked == raw_u1, f"{repacked} != {raw_u1}" # Cancel the to_int(to_bool(x)). This is sound because x in # [0, 1] - # pyrefly: ignore # unbound-name + raw_u1 = new_raw_u1 if not isinstance(raw_u1, sympy.Symbol): @@ -1055,7 +1054,6 @@ def find_symbol_binding_fx_nodes( # NB: Prefer first occurrence of symbol for node in graph.nodes: if (s := is_symbol_binding_fx_node(node)) is not None and s not in r: - # pyrefly: ignore # unbound-name r[s] = node return r @@ -1226,13 +1224,12 @@ def _free_unbacked_symbols_with_path( and isinstance(s := expr(a), sympy.Symbol) and s in pending ): - # pyrefly: ignore # unbound-name r[s] = path if shape_env and real is not None: assert isinstance(real, (int, float)) - # pyrefly: ignore # unbound-name + shape_env.set_unbacked_var_to_val(s, real) - # pyrefly: ignore # unbound-name + pending.remove(s) # When an unbacked SymInt is perfectly divisible by an integer # constant, we replace it with the integer constant to improve @@ -1262,14 +1259,10 @@ def _free_unbacked_symbols_with_path( source=shape_env.var_to_sources.get(s, [None])[0], # type: ignore[union-attr] ) - # pyrefly: ignore # unbound-name unbacked = lhs if lhs in pending else rhs divisor: IntLikeType = ( - # pyrefly: ignore # unbound-name int(coeff) - # pyrefly: ignore # unbound-name if shape_env and isinstance(coeff, sympy.Integer) - # pyrefly: ignore # unbound-name else _symint_wrap(coeff) ) # TODO: DivideByKey needs to test divisibility at runtime! @@ -1278,11 +1271,8 @@ def _free_unbacked_symbols_with_path( if real is not None: assert isinstance(real, int) val = ( - # pyrefly: ignore # unbound-name real // int(coeff) - # pyrefly: ignore # unbound-name if isinstance(coeff, sympy.Integer) - # pyrefly: ignore # unbound-name else CleanDiv(real, coeff) ) if shape_env: @@ -1299,14 +1289,12 @@ def _free_unbacked_symbols_with_path( and s.rhs == 1 and s.lhs in pending ): - # pyrefly: ignore # unsupported-operation r[s.lhs] = path + (ConvertIntKey(),) if real is not None: assert type(real) is bool if shape_env: - # pyrefly: ignore # unbound-name shape_env.set_unbacked_var_to_val(s, int(real)) - # pyrefly: ignore # unbound-name + pending.remove(s.lhs) return r @@ -1382,7 +1370,6 @@ def compute_unbacked_bindings( ): if ( isinstance(old_sym, SymTypes) - # pyrefly: ignore # unbound-name and (old_s := old_sym.node.expr) != new_s ): # If old_s is not an unbacked_symbol, @@ -1392,15 +1379,12 @@ def compute_unbacked_bindings( # and the original symbol gets replaced by the backed symbol. # When this happens we just replace new_s by the old_s # because we know the value is the same. - # pyrefly: ignore # unbound-name + if isinstance(old_s, sympy.Symbol) and free_unbacked_symbols(old_s): - # pyrefly: ignore # unbound-name shape_env._rename_unbacked_to(new_s, old_s) else: - # pyrefly: ignore # unbound-name shape_env._eliminate_unbacked(new_s, old_s) elif not isinstance(old_sym, SymTypes): - # pyrefly: ignore # unbound-name shape_env._eliminate_unbacked(new_s, sympy.sympify(old_sym)) return symbol_to_path @@ -3365,7 +3349,7 @@ class DimConstraints: and str(symbol := next(iter(c["eq"].free_symbols))) == old_root ): # derived dim with root = old_root new_root_expr = results[str(old_root)]["eq"] # dx=3*_dx+1 - # pyrefly: ignore # unbound-name + new_expr = c["eq"].subs({symbol: new_root_expr}) # dy=(3*_dx+1)+1 c["eq"] = new_expr @@ -7630,10 +7614,9 @@ class ShapeEnv: log.info( "oblivious_size %s -> %s (passed counterfactual)", orig_expr, - # pyrefly: ignore # unbound-name correct_hint, ) - # pyrefly: ignore # unbound-name + concrete_val = correct_hint # NB: do NOT transmute into runtime assert ok = True @@ -7650,10 +7633,9 @@ class ShapeEnv: ).xreplace(self.var_to_val) ).free_symbols ): - # pyrefly: ignore # unbound-name self._log_real_tensor_propagation(orig_expr, unsound_result) transmute_into_runtime_assert = True - # pyrefly: ignore # unbound-name + concrete_val = unsound_result ok = True diff --git a/torch/fx/graph.py b/torch/fx/graph.py index 940737e7e3a..b9ed7370381 100644 --- a/torch/fx/graph.py +++ b/torch/fx/graph.py @@ -1314,6 +1314,7 @@ class Graph: f(to_erase) self._find_nodes_lookup_table.remove(to_erase) + # pyrefly: ignore # missing-attribute to_erase._remove_from_list() to_erase._erased = True # iterators may retain handles to erased nodes self._len -= 1 diff --git a/torch/fx/node.py b/torch/fx/node.py index 466c704fb92..ad848c80970 100644 --- a/torch/fx/node.py +++ b/torch/fx/node.py @@ -385,6 +385,7 @@ class Node(_NodeBase): Args: x (Node): The node to put before this node. Must be a member of the same graph. """ + # pyrefly: ignore # missing-attribute self._prepend(x) @compatibility(is_backward_compatible=True) @@ -396,6 +397,7 @@ class Node(_NodeBase): Args: x (Node): The node to put after this node. Must be a member of the same graph. """ + # pyrefly: ignore # missing-attribute self._next._prepend(x) @property diff --git a/torch/fx/passes/_tensorify_python_scalars.py b/torch/fx/passes/_tensorify_python_scalars.py index 41a9e371344..bf6a2d99c40 100644 --- a/torch/fx/passes/_tensorify_python_scalars.py +++ b/torch/fx/passes/_tensorify_python_scalars.py @@ -276,7 +276,6 @@ def tensorify_python_scalars( ): transform = True try: - # pyrefly: ignore # unbound-name proxy = _sympy_interp(zf.node.expr) except NotImplementedError: transform = False @@ -303,7 +302,6 @@ def tensorify_python_scalars( args.append(a) if transform: - # pyrefly: ignore # unbound-name replacement_proxy = replacement_op(*args) # pyrefly: ignore # missing-attribute diff --git a/torch/fx/passes/fake_tensor_prop.py b/torch/fx/passes/fake_tensor_prop.py index 48b35f5183b..43dbe86c737 100644 --- a/torch/fx/passes/fake_tensor_prop.py +++ b/torch/fx/passes/fake_tensor_prop.py @@ -93,7 +93,6 @@ class FakeTensorProp(torch.fx.Interpreter): if (shape_env := self._mode.shape_env) and ( symbol_to_path := compute_unbacked_bindings(shape_env, result) ): - # pyrefly: ignore # unbound-name n.meta["unbacked_bindings"] = symbol_to_path return result diff --git a/torch/fx/passes/runtime_assert.py b/torch/fx/passes/runtime_assert.py index 46fd2afa229..46298304adb 100644 --- a/torch/fx/passes/runtime_assert.py +++ b/torch/fx/passes/runtime_assert.py @@ -298,14 +298,12 @@ def insert_deferred_runtime_asserts( and s not in expr_to_proxy ): with _set_node_metadata_hook(gm, _node_metadata_hook): - # pyrefly: ignore # unbound-name expr_to_proxy[s] = fx.Proxy(cb(), tracer=tracer) - # pyrefly: ignore # unbound-name + log.debug("expr_to_proxy[%s] = %s", s, expr_to_proxy[s]) - # pyrefly: ignore # unbound-name match_symbol(example_value, lambda: node) - # pyrefly: ignore # unbound-name + if isinstance(t := example_value, torch.Tensor): for i, s in enumerate(t.size()): match_symbol( @@ -386,7 +384,6 @@ def insert_deferred_runtime_asserts( # maybe re-reify expression, replace current node if ( - # pyrefly: ignore # unbound-name sym_expr in expr_to_proxy or ( # example value is redundant _is_intermediate_tensor_sym_call(node) @@ -405,10 +402,8 @@ def insert_deferred_runtime_asserts( nn_module_stack=node.meta.get("nn_module_stack"), ), ): - # pyrefly: ignore # unbound-name expr_to_proxy[sym_expr] = _sympy_interp( expr_to_proxy, - # pyrefly: ignore # unbound-name sym_expr, ) # type: ignore[arg-type] # won't try DCE-ing tensor compute here @@ -419,14 +414,12 @@ def insert_deferred_runtime_asserts( "CSE node %s -> %s for expr %s", node, hash_node, - # pyrefly: ignore # unbound-name sym_expr, ) # store node in hash cons, don't delete/replace - # pyrefly: ignore # unbound-name + elif sym_expr not in expr_to_proxy and not isinstance( - # pyrefly: ignore # unbound-name sym_expr, (sympy.Number, sympy.logic.boolalg.BooleanAtom), ): # don't hash cons primitives diff --git a/torch/fx/passes/split_module.py b/torch/fx/passes/split_module.py index 1518c6edc88..fdbec419041 100644 --- a/torch/fx/passes/split_module.py +++ b/torch/fx/passes/split_module.py @@ -318,7 +318,6 @@ def split_module( and isinstance(s0 := val.node.expr, sympy.Symbol) and s0 not in symbol_to_node ): - # pyrefly: ignore # unbound-name symbol_to_node[val.node.expr] = node if node.op in ["placeholder", "get_attr", "output"]: diff --git a/torch/fx/passes/utils/source_matcher_utils.py b/torch/fx/passes/utils/source_matcher_utils.py index d504ce56fd6..043c65e6b77 100644 --- a/torch/fx/passes/utils/source_matcher_utils.py +++ b/torch/fx/passes/utils/source_matcher_utils.py @@ -85,7 +85,6 @@ def get_source_partitions( if (source_fn_st := node.meta.get("source_fn_stack", None)) is None and ( torch_fn := node.meta.get("torch_fn", None) ) is not None: - # pyrefly: ignore # unbound-name node_fqn, source_fn = torch_fn source_fn_name = source_fn.split(".")[1] if source_fn_name in wanted_sources: diff --git a/torch/hub.py b/torch/hub.py index d3328d1abe6..84740905ecc 100644 --- a/torch/hub.py +++ b/torch/hub.py @@ -421,7 +421,7 @@ def set_dir(d: Union[str, os.PathLike]) -> None: d (str): path to a local folder to save downloaded models & weights. """ global _hub_dir - _hub_dir = os.path.expanduser(d) # pyrefly: ignore # no-matching-overload + _hub_dir = os.path.expanduser(d) def list( diff --git a/torch/jit/_serialization.py b/torch/jit/_serialization.py index c719a01708c..0641aefd785 100644 --- a/torch/jit/_serialization.py +++ b/torch/jit/_serialization.py @@ -167,7 +167,6 @@ def load(f, map_location=None, _extra_files=None, _restore_shapes=False): if isinstance(f, (str, os.PathLike)): cpp_module = torch._C.import_ir_module( cu, - # pyrefly: ignore # no-matching-overload os.fspath(f), map_location, _extra_files, @@ -208,7 +207,6 @@ def validate_map_location(map_location=None): def jit_module_from_flatbuffer(f): if isinstance(f, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload f = os.fspath(f) return wrap_cpp_module(torch._C._load_jit_module_from_file(f)) else: @@ -258,7 +256,6 @@ def save_jit_module_to_flatbuffer(m, f, _extra_files=None): extra_files = {} if isinstance(f, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload f = os.fspath(f) torch._C._save_jit_module(m._c, f, extra_files) else: diff --git a/torch/jit/mobile/__init__.py b/torch/jit/mobile/__init__.py index d5a3c525659..9623ace3137 100644 --- a/torch/jit/mobile/__init__.py +++ b/torch/jit/mobile/__init__.py @@ -44,7 +44,6 @@ def _load_for_lite_interpreter(f, map_location=None): map_location = validate_map_location(map_location) if isinstance(f, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload cpp_module = torch._C._load_for_lite_interpreter(os.fspath(f), map_location) else: cpp_module = torch._C._load_for_lite_interpreter_from_buffer( @@ -106,7 +105,6 @@ def _get_model_bytecode_version(f_input) -> int: raise ValueError(f"The provided filename {f_input} is a directory") if isinstance(f_input, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload return torch._C._get_model_bytecode_version(os.fspath(f_input)) else: # pyrefly: ignore # missing-attribute @@ -140,7 +138,6 @@ def _get_mobile_model_contained_types(f_input) -> int: raise ValueError(f"The provided filename {f_input} is a directory") if isinstance(f_input, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload return torch._C._get_mobile_model_contained_types(os.fspath(f_input)) else: # pyrefly: ignore # missing-attribute @@ -168,9 +165,7 @@ def _backport_for_mobile(f_input, f_output, to_version): isinstance(f_output, (str, os.PathLike)) ): return torch._C._backport_for_mobile( - # pyrefly: ignore # no-matching-overload os.fspath(f_input), - # pyrefly: ignore # no-matching-overload os.fspath(f_output), to_version, ) @@ -198,7 +193,6 @@ def _backport_for_mobile_to_buffer(f_input, to_version): raise ValueError(f"The provided filename {f_input} is a directory") if isinstance(f_input, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload return torch._C._backport_for_mobile_to_buffer(os.fspath(f_input), to_version) else: return torch._C._backport_for_mobile_from_buffer_to_buffer( @@ -244,7 +238,6 @@ def _get_model_ops_and_info(f_input): raise ValueError(f"The provided filename {f_input} is a directory") if isinstance(f_input, (str, os.PathLike)): - # pyrefly: ignore # no-matching-overload return torch._C._get_model_ops_and_info(os.fspath(f_input)) else: # pyrefly: ignore # missing-attribute diff --git a/torch/library.py b/torch/library.py index d962c08c390..0490e68b5d1 100644 --- a/torch/library.py +++ b/torch/library.py @@ -644,7 +644,7 @@ def impl( >>> y2 = torch.sin(x) + 1 >>> assert torch.allclose(y1, y2) """ - # pyrefly: ignore # no-matching-overload + return _impl(qualname, types, func, lib=lib, disable_dynamo=False) @@ -831,7 +831,6 @@ def register_kernel( if device_types is None: device_types = "CompositeExplicitAutograd" - # pyrefly: ignore # no-matching-overload return _impl(op, device_types, func, lib=lib, disable_dynamo=True) diff --git a/torch/masked/_ops.py b/torch/masked/_ops.py index 382a493782c..fd7d19b0284 100644 --- a/torch/masked/_ops.py +++ b/torch/masked/_ops.py @@ -642,7 +642,6 @@ def _sparse_coo_scatter_reduction_helper( # promote dtype if specified if values.dtype != output_dtype: - # pyrefly: ignore # no-matching-overload values = values.to(output_dtype) if keepdim: @@ -767,7 +766,6 @@ def _sparse_csr_segment_reduction_helper( # promote dtype if specified if values.dtype != output_dtype: - # pyrefly: ignore # no-matching-overload values = values.to(output_dtype) if len(dims) == 0: diff --git a/torch/nn/modules/container.py b/torch/nn/modules/container.py index 373b6743c5b..711b8d5c190 100644 --- a/torch/nn/modules/container.py +++ b/torch/nn/modules/container.py @@ -473,7 +473,6 @@ class ModuleList(Module): return self def pop(self, key: Union[int, slice]) -> Module: - # pyrefly: ignore # index-error v = self[key] del self[key] return v diff --git a/torch/nn/modules/conv.py b/torch/nn/modules/conv.py index 35ae57bcbcd..d8af4862697 100644 --- a/torch/nn/modules/conv.py +++ b/torch/nn/modules/conv.py @@ -363,7 +363,7 @@ class Conv1d(_ConvNd): self.dilation, self.groups, ) - # pyrefly: ignore # no-matching-overload + return F.conv1d( input, weight, bias, self.stride, self.padding, self.dilation, self.groups ) @@ -541,7 +541,7 @@ class Conv2d(_ConvNd): self.dilation, self.groups, ) - # pyrefly: ignore # no-matching-overload + return F.conv2d( input, weight, bias, self.stride, self.padding, self.dilation, self.groups ) @@ -711,7 +711,7 @@ class Conv3d(_ConvNd): self.dilation, self.groups, ) - # pyrefly: ignore # no-matching-overload + return F.conv3d( input, weight, bias, self.stride, self.padding, self.dilation, self.groups ) diff --git a/torch/onnx/_internal/torchscript_exporter/symbolic_helper.py b/torch/onnx/_internal/torchscript_exporter/symbolic_helper.py index 3f92f6418c8..59cd0eb0f89 100644 --- a/torch/onnx/_internal/torchscript_exporter/symbolic_helper.py +++ b/torch/onnx/_internal/torchscript_exporter/symbolic_helper.py @@ -364,7 +364,6 @@ def parse_args( fn_name = None args = [ _parse_arg(arg, arg_desc, arg_name, fn_name) # type: ignore[method-assign] - # pyrefly: ignore # no-matching-overload for arg, arg_desc, arg_name in zip(args, arg_descriptors, arg_names) ] # only support _outputs in kwargs diff --git a/torch/optim/adam.py b/torch/optim/adam.py index 77a00734007..b10dadd9e50 100644 --- a/torch/optim/adam.py +++ b/torch/optim/adam.py @@ -453,7 +453,7 @@ def _single_tensor_adam( device_beta1 = beta1 # Decay the first and second moment running average coefficient - # pyrefly: ignore # no-matching-overload + exp_avg.lerp_(grad, 1 - device_beta1) # Nested if is necessary to bypass jitscript rules diff --git a/torch/optim/optimizer.py b/torch/optim/optimizer.py index 66d794a3a40..c542dbfd84f 100644 --- a/torch/optim/optimizer.py +++ b/torch/optim/optimizer.py @@ -398,7 +398,6 @@ class Optimizer: self.state: defaultdict[torch.Tensor, Any] = defaultdict(dict) self.param_groups: list[dict[str, Any]] = [] - # pyrefly: ignore # no-matching-overload param_groups = list(params) if len(param_groups) == 0: raise ValueError("optimizer got an empty parameter list") diff --git a/torch/package/package_exporter.py b/torch/package/package_exporter.py index 7b686f00820..50e9cbe92fb 100644 --- a/torch/package/package_exporter.py +++ b/torch/package/package_exporter.py @@ -219,7 +219,7 @@ class PackageExporter: torch._C._log_api_usage_once("torch.package.PackageExporter") self.debug = debug if isinstance(f, (str, os.PathLike)): - f = os.fspath(f) # pyrefly: ignore # no-matching-overload + f = os.fspath(f) self.buffer: Optional[IO[bytes]] = None else: # is a byte buffer self.buffer = f diff --git a/torch/package/package_importer.py b/torch/package/package_importer.py index 8f2a009f912..10bf8981e28 100644 --- a/torch/package/package_importer.py +++ b/torch/package/package_importer.py @@ -108,7 +108,6 @@ class PackageImporter(Importer): self.filename = "" self.zip_reader = file_or_buffer elif isinstance(file_or_buffer, (os.PathLike, str)): - # pyrefly: ignore # no-matching-overload self.filename = os.fspath(file_or_buffer) if not os.path.isdir(self.filename): self.zip_reader = torch._C.PyTorchFileReader(self.filename) diff --git a/torch/serialization.py b/torch/serialization.py index dcdbf0c3cef..1ef46d63ca2 100644 --- a/torch/serialization.py +++ b/torch/serialization.py @@ -774,10 +774,7 @@ def _open_file_like(name_or_buffer: FileLike, mode: str) -> _opener[IO[bytes]]: class _open_zipfile_reader(_opener[torch._C.PyTorchFileReader]): def __init__(self, name_or_buffer: Union[str, IO[bytes]]) -> None: - super().__init__( - # pyrefly: ignore # no-matching-overload - torch._C.PyTorchFileReader(name_or_buffer) - ) + super().__init__(torch._C.PyTorchFileReader(name_or_buffer)) class _open_zipfile_writer_file(_opener[torch._C.PyTorchFileWriter]): @@ -970,7 +967,7 @@ def save( _check_save_filelike(f) if isinstance(f, (str, os.PathLike)): - f = os.fspath(f) # pyrefly: ignore # no-matching-overload + f = os.fspath(f) if _use_new_zipfile_serialization: with _open_zipfile_writer(f) as opened_zipfile: @@ -1524,7 +1521,6 @@ def load( else: shared = False overall_storage = torch.UntypedStorage.from_file( - # pyrefly: ignore # no-matching-overload os.fspath(f), shared, size, diff --git a/torch/utils/_cxx_pytree.py b/torch/utils/_cxx_pytree.py index 9ffd8190a5e..a0865b0c9bd 100644 --- a/torch/utils/_cxx_pytree.py +++ b/torch/utils/_cxx_pytree.py @@ -701,7 +701,6 @@ def tree_map_only( tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]] = None, ) -> PyTree: - # pyrefly: ignore # no-matching-overload return tree_map(map_only(type_or_types_or_pred)(func), tree, is_leaf=is_leaf) @@ -762,7 +761,6 @@ def tree_map_only_( tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]] = None, ) -> PyTree: - # pyrefly: ignore # no-matching-overload return tree_map_(map_only(type_or_types_or_pred)(func), tree, is_leaf=is_leaf) diff --git a/torch/utils/_pytree.py b/torch/utils/_pytree.py index 53ba046b3ef..759e0e61138 100644 --- a/torch/utils/_pytree.py +++ b/torch/utils/_pytree.py @@ -1555,7 +1555,6 @@ def tree_map_only( tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]] = None, ) -> PyTree: - # pyrefly: ignore # no-matching-overload return tree_map(map_only(type_or_types_or_pred)(func), tree, is_leaf=is_leaf) @@ -1616,7 +1615,6 @@ def tree_map_only_( tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]] = None, ) -> PyTree: - # pyrefly: ignore # no-matching-overload return tree_map_(map_only(type_or_types_or_pred)(func), tree, is_leaf=is_leaf) diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py index b0b4399f0a5..2fa5eda7fff 100644 --- a/torch/utils/cpp_extension.py +++ b/torch/utils/cpp_extension.py @@ -1531,7 +1531,7 @@ def include_paths(device_type: str = "cpu", torch_include_dirs=True) -> list[str # Support CUDA_INC_PATH env variable supported by CMake files if (cuda_inc_path := os.environ.get("CUDA_INC_PATH", None)) and \ cuda_inc_path != '/usr/include': - # pyrefly: ignore # unbound-name + paths.append(cuda_inc_path) if CUDNN_HOME is not None: paths.append(os.path.join(CUDNN_HOME, 'include')) diff --git a/torch/utils/data/dataloader.py b/torch/utils/data/dataloader.py index ef0d0c20132..5e5307555e5 100644 --- a/torch/utils/data/dataloader.py +++ b/torch/utils/data/dataloader.py @@ -678,7 +678,6 @@ class _BaseDataLoaderIter: # Set pin memory device based on the current accelerator. self._pin_memory_device = ( - # pyrefly: ignore # unbound-name acc.type if self._pin_memory and (acc := torch.accelerator.current_accelerator()) is not None diff --git a/torch/xpu/__init__.py b/torch/xpu/__init__.py index 137e960afab..e95b7015f33 100644 --- a/torch/xpu/__init__.py +++ b/torch/xpu/__init__.py @@ -251,7 +251,6 @@ def get_device_capability(device: Optional[_device_t] = None) -> dict[str, Any]: # Only keep attributes that are safe for dictionary serialization. serializable_types = (int, float, bool, str, type(None), list, tuple, dict) return { - # pyrefly: ignore # unbound-name key: value for key in dir(props) if not key.startswith("__")