diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h index 75c1c79ee5c..e7a1c5712c1 100644 --- a/aten/src/ATen/Context.h +++ b/aten/src/ATen/Context.h @@ -52,27 +52,29 @@ class TORCH_API Context { const AcceleratorHooksInterface& getAcceleratorHooksInterface( std::optional opt_device_type = std::nullopt) { - c10::DeviceType device_type = opt_device_type.has_value() - ? opt_device_type.value() - : at::getAccelerator(true).value(); - if (device_type == at::kCUDA) { + if (!opt_device_type.has_value()) { + opt_device_type = at::getAccelerator(true); + } + if (opt_device_type == at::kCUDA) { return at::detail::getCUDAHooks(); - } else if (device_type == at::kXPU) { + } else if (opt_device_type == at::kXPU) { return at::detail::getXPUHooks(); - } else if (device_type == at::kMPS) { + } else if (opt_device_type == at::kMPS) { return at::detail::getMPSHooks(); - } else if (device_type == at::kPrivateUse1) { + } else if (opt_device_type == at::kPrivateUse1) { return at::detail::getPrivateUse1Hooks(); - } else if (device_type == at::kMTIA) { + } else if (opt_device_type == at::kMTIA) { return at::detail::getMTIAHooks(); - } else if (device_type == at::kHIP) { + } else if (opt_device_type == at::kHIP) { return at::detail::getHIPHooks(); - } else if (device_type == at::kHPU) { + } else if (opt_device_type == at::kHPU) { return at::detail::getHPUHooks(); } else { TORCH_CHECK( false, - c10::DeviceTypeName(device_type), + opt_device_type.has_value() + ? c10::DeviceTypeName(opt_device_type.value()) + : "None", " device type not an accelerator."); } } diff --git a/aten/src/ATen/FunctionalTensorWrapper.cpp b/aten/src/ATen/FunctionalTensorWrapper.cpp index c16c29ed58a..c72bd3453e2 100644 --- a/aten/src/ATen/FunctionalTensorWrapper.cpp +++ b/aten/src/ATen/FunctionalTensorWrapper.cpp @@ -727,8 +727,9 @@ bool isFunctionalTensor(const c10::List<::std::optional>& t_list) { if (t_list.empty()) return false; auto functional_count = 0; for (const auto i : c10::irange(t_list.size())) { - if (!t_list[i].has_value() || !t_list[i]->defined()) continue; - if (isFunctionalTensor(t_list[i])) { + auto const & e= t_list[i]; + if (!e.has_value() || !e->defined()) continue; + if (isFunctionalTensor(e)) { ++functional_count; } } diff --git a/aten/src/ATen/NamedTensorUtils.cpp b/aten/src/ATen/NamedTensorUtils.cpp index a07ddabfca9..f8cd819f484 100644 --- a/aten/src/ATen/NamedTensorUtils.cpp +++ b/aten/src/ATen/NamedTensorUtils.cpp @@ -40,7 +40,7 @@ std::vector dimnames_to_positions(const Tensor& tensor, DimnameList dim return result; } -static void report_positional_error( +[[noreturn]] static void report_positional_error( const Dimname& name, const Dimname& other_name, DimnameList names, diff --git a/aten/src/ATen/SavedTensorHooks.cpp b/aten/src/ATen/SavedTensorHooks.cpp index 871d9df0c92..0313849f788 100644 --- a/aten/src/ATen/SavedTensorHooks.cpp +++ b/aten/src/ATen/SavedTensorHooks.cpp @@ -17,6 +17,7 @@ namespace { } static void assertSavedTensorHooksNotDisabled() { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) TORCH_CHECK(SavedTensorDefaultHooks::is_enabled(), tls.disabled_error_message.value()); } diff --git a/aten/src/ATen/TensorIterator.cpp b/aten/src/ATen/TensorIterator.cpp index c151c8d7731..4ecbe2fe23c 100644 --- a/aten/src/ATen/TensorIterator.cpp +++ b/aten/src/ATen/TensorIterator.cpp @@ -177,6 +177,7 @@ TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef sha TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef shape, IntArrayRef squash_dims) { declare_static_shape(shape); + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) if (static_shape_->empty()) return *this; for (const auto& squash_dim : squash_dims) { TORCH_CHECK(squash_dim >= 0 && squash_dim < static_cast(static_shape_->size()), diff --git a/aten/src/ATen/core/CheckMemoryFormat.h b/aten/src/ATen/core/CheckMemoryFormat.h index cc2167ea811..860eec8e7a1 100644 --- a/aten/src/ATen/core/CheckMemoryFormat.h +++ b/aten/src/ATen/core/CheckMemoryFormat.h @@ -7,8 +7,7 @@ check_tensor_options_and_extract_memory_format( const TensorOptions& options, std::optional memory_format) { TORCH_CHECK( - options.requires_grad_opt() == std::nullopt || - options.requires_grad_opt().value() == false, + options.requires_grad_opt() != true, "Operators taking TensorOptions cannot take a TensorOptions with " "options.requires_grad set as true. This isn't implemented yet."); TORCH_CHECK( diff --git a/aten/src/ATen/core/class_type.h b/aten/src/ATen/core/class_type.h index c4223443274..810efb7b6ec 100644 --- a/aten/src/ATen/core/class_type.h +++ b/aten/src/ATen/core/class_type.h @@ -85,7 +85,9 @@ struct TORCH_API ClassType : public NamedType { return true; } if (auto user_rhs = rhs.castRaw()) { + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) const auto& lhs_name = name().value(); + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) const auto& rhs_name = user_rhs->name().value(); return lhs_name == rhs_name && diff --git a/aten/src/ATen/core/enum_type.h b/aten/src/ATen/core/enum_type.h index 4d61be51e04..e0268262c0a 100644 --- a/aten/src/ATen/core/enum_type.h +++ b/aten/src/ATen/core/enum_type.h @@ -28,7 +28,8 @@ struct TORCH_API EnumType : public NamedType { std::move(enum_names_values), std::move(cu))); default: - TORCH_CHECK(false, + TORCH_CHECK( + false, "Cannot create Enum with value type '", value->str(), "', only int, float and string are supported"); @@ -49,7 +50,7 @@ struct TORCH_API EnumType : public NamedType { bool equals(const Type& rhs) const override { if (auto* enum_rhs = rhs.castRaw()) { - return name().value() == enum_rhs->name().value() && + return name().has_value() && name() == enum_rhs->name() && *getValueType() == *(enum_rhs->getValueType()) && this->compilation_unit() == enum_rhs->compilation_unit(); } diff --git a/aten/src/ATen/core/function_schema.cpp b/aten/src/ATen/core/function_schema.cpp index cebab59d066..3d66252045b 100644 --- a/aten/src/ATen/core/function_schema.cpp +++ b/aten/src/ATen/core/function_schema.cpp @@ -485,7 +485,7 @@ bool FunctionSchema::isForwardCompatibleWith( return false; } - auto default_val = arguments().at(i).default_value().value(); + auto const &default_val = arguments().at(i).default_value().value(); if (default_val.isList() || default_val.isGenericDict()) { if (why_not) { why_not diff --git a/aten/src/ATen/core/function_schema.h b/aten/src/ATen/core/function_schema.h index 02ed59b7a22..e7c8e7adfa4 100644 --- a/aten/src/ATen/core/function_schema.h +++ b/aten/src/ATen/core/function_schema.h @@ -398,7 +398,7 @@ struct TORCH_API FunctionSchema { bool is_mutable(std::string_view name) const { std::optional index = argumentIndexWithName(name); TORCH_INTERNAL_ASSERT( - index != std::nullopt, "Schema has no argument named ", name); + index.has_value(), "Schema has no argument named ", name); return is_mutable({c10::SchemaArgType::input, static_cast(*index)}); } diff --git a/c10/util/StringUtil.h b/c10/util/StringUtil.h index 8289fe453f4..41f80496f7a 100644 --- a/c10/util/StringUtil.h +++ b/c10/util/StringUtil.h @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -56,6 +57,14 @@ inline std::ostream& _str(std::ostream& ss, const T& t) { return ss; } +template +inline std::ostream& _str(std::ostream& ss, const std::optional& t) { + if (t.has_value()) { + return _str(ss, t.value()); + } + ss << "std::nullopt"; + return ss; +} // Overloads of _str for wide types; forces narrowing. C10_API std::ostream& _str(std::ostream& ss, const wchar_t* wCStr); C10_API std::ostream& _str(std::ostream& ss, const wchar_t& wChar); diff --git a/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp b/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp index ca7f8e97ae3..055ca006528 100644 --- a/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp +++ b/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp @@ -130,8 +130,9 @@ std::optional compute_target_device( } for (auto& tens_list : opt_tlist_args) { for (const auto i : c10::irange(tens_list.size())) { - if (tens_list.get(i).has_value()) { - return tens_list.get(i)->device(); + auto const& e = tens_list.get(i); + if (e.has_value()) { + return e->device(); } } } diff --git a/torch/csrc/utils/python_arg_parser.h b/torch/csrc/utils/python_arg_parser.h index 4bc98a9676c..34246fb2f1f 100644 --- a/torch/csrc/utils/python_arg_parser.h +++ b/torch/csrc/utils/python_arg_parser.h @@ -511,7 +511,8 @@ inline PyObject* toPyObject(const c10::SymInt& symint) { return r; } else { auto m = symint.maybe_as_int(); - return THPUtils_packInt64(*m); + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) + return THPUtils_packInt64(m.value()); } } @@ -812,6 +813,7 @@ inline std::optional PythonArgs::layoutOptional(int i) { inline at::Device deviceFromLong(int64_t device_index) { TORCH_CHECK(device_index >= 0, "Device index must not be negative"); return at::Device( + // NOLINTNEXTLINE(bugprone-unchecked-optional-access) at::getAccelerator(true).value(), static_cast(device_index)); } diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index e6371498314..12ae293cf78 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -986,14 +986,14 @@ static Tensor sparse_compressed_tensor_ctor_worker( (required_layout ? r.layoutWithDefault(ARG_LAYOUT, required_layout.value()) : r.layoutOptional(ARG_LAYOUT)); - if (required_layout) { + if (required_layout.has_value()) { TORCH_CHECK( - layout.value() == required_layout.value(), + layout.has_value() && layout == required_layout, name, ": layout must be ", required_layout.value(), " but got ", - layout.value()); + layout); } return at::sparse_compressed_tensor( compressed_indices, @@ -1042,14 +1042,14 @@ static Tensor sparse_compressed_tensor_ctor_worker( (required_layout ? r.layoutWithDefault(ARG_LAYOUT1, required_layout.value()) : r.layoutOptional(ARG_LAYOUT1)); - if (required_layout) { + if (required_layout.has_value()) { TORCH_CHECK( - layout.value() == required_layout.value(), + layout == required_layout, name, ": layout must be ", required_layout.value(), " but got ", - layout.value()); + layout); } return at::sparse_compressed_tensor( compressed_indices, diff --git a/torch/csrc/utils/torch_dispatch_mode.h b/torch/csrc/utils/torch_dispatch_mode.h index 8fe5404b44a..45e567b9ee7 100644 --- a/torch/csrc/utils/torch_dispatch_mode.h +++ b/torch/csrc/utils/torch_dispatch_mode.h @@ -19,7 +19,7 @@ struct StashTorchDispatchModeGuard { } ~StashTorchDispatchModeGuard() { - if (saved_mode_key_ != std::nullopt) { + if (saved_mode_key_.has_value()) { c10::impl::TorchDispatchModeTLS::set_mode( saved_mode_, saved_mode_key_.value()); } else {