[1/N] Apply bugprone-unchecked-optional-access (#140679)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/140679
Approved by: https://github.com/ezyang
This commit is contained in:
cyy 2024-11-20 04:04:38 +00:00 committed by PyTorch MergeBot
parent a4e8ca789a
commit d91484509a
15 changed files with 49 additions and 30 deletions

View File

@ -52,27 +52,29 @@ class TORCH_API Context {
const AcceleratorHooksInterface& getAcceleratorHooksInterface(
std::optional<c10::DeviceType> opt_device_type = std::nullopt) {
c10::DeviceType device_type = opt_device_type.has_value()
? opt_device_type.value()
: at::getAccelerator(true).value();
if (device_type == at::kCUDA) {
if (!opt_device_type.has_value()) {
opt_device_type = at::getAccelerator(true);
}
if (opt_device_type == at::kCUDA) {
return at::detail::getCUDAHooks();
} else if (device_type == at::kXPU) {
} else if (opt_device_type == at::kXPU) {
return at::detail::getXPUHooks();
} else if (device_type == at::kMPS) {
} else if (opt_device_type == at::kMPS) {
return at::detail::getMPSHooks();
} else if (device_type == at::kPrivateUse1) {
} else if (opt_device_type == at::kPrivateUse1) {
return at::detail::getPrivateUse1Hooks();
} else if (device_type == at::kMTIA) {
} else if (opt_device_type == at::kMTIA) {
return at::detail::getMTIAHooks();
} else if (device_type == at::kHIP) {
} else if (opt_device_type == at::kHIP) {
return at::detail::getHIPHooks();
} else if (device_type == at::kHPU) {
} else if (opt_device_type == at::kHPU) {
return at::detail::getHPUHooks();
} else {
TORCH_CHECK(
false,
c10::DeviceTypeName(device_type),
opt_device_type.has_value()
? c10::DeviceTypeName(opt_device_type.value())
: "None",
" device type not an accelerator.");
}
}

View File

@ -727,8 +727,9 @@ bool isFunctionalTensor(const c10::List<::std::optional<Tensor>>& t_list) {
if (t_list.empty()) return false;
auto functional_count = 0;
for (const auto i : c10::irange(t_list.size())) {
if (!t_list[i].has_value() || !t_list[i]->defined()) continue;
if (isFunctionalTensor(t_list[i])) {
auto const & e= t_list[i];
if (!e.has_value() || !e->defined()) continue;
if (isFunctionalTensor(e)) {
++functional_count;
}
}

View File

@ -40,7 +40,7 @@ std::vector<int64_t> dimnames_to_positions(const Tensor& tensor, DimnameList dim
return result;
}
static void report_positional_error(
[[noreturn]] static void report_positional_error(
const Dimname& name,
const Dimname& other_name,
DimnameList names,

View File

@ -17,6 +17,7 @@ namespace {
}
static void assertSavedTensorHooksNotDisabled() {
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
TORCH_CHECK(SavedTensorDefaultHooks::is_enabled(), tls.disabled_error_message.value());
}

View File

@ -177,6 +177,7 @@ TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef sha
TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef shape, IntArrayRef squash_dims) {
declare_static_shape(shape);
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
if (static_shape_->empty()) return *this;
for (const auto& squash_dim : squash_dims) {
TORCH_CHECK(squash_dim >= 0 && squash_dim < static_cast<int64_t>(static_shape_->size()),

View File

@ -7,8 +7,7 @@ check_tensor_options_and_extract_memory_format(
const TensorOptions& options,
std::optional<MemoryFormat> memory_format) {
TORCH_CHECK(
options.requires_grad_opt() == std::nullopt ||
options.requires_grad_opt().value() == false,
options.requires_grad_opt() != true,
"Operators taking TensorOptions cannot take a TensorOptions with "
"options.requires_grad set as true. This isn't implemented yet.");
TORCH_CHECK(

View File

@ -85,7 +85,9 @@ struct TORCH_API ClassType : public NamedType {
return true;
}
if (auto user_rhs = rhs.castRaw<ClassType>()) {
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
const auto& lhs_name = name().value();
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
const auto& rhs_name = user_rhs->name().value();
return lhs_name == rhs_name &&

View File

@ -28,7 +28,8 @@ struct TORCH_API EnumType : public NamedType {
std::move(enum_names_values),
std::move(cu)));
default:
TORCH_CHECK(false,
TORCH_CHECK(
false,
"Cannot create Enum with value type '",
value->str(),
"', only int, float and string are supported");
@ -49,7 +50,7 @@ struct TORCH_API EnumType : public NamedType {
bool equals(const Type& rhs) const override {
if (auto* enum_rhs = rhs.castRaw<EnumType>()) {
return name().value() == enum_rhs->name().value() &&
return name().has_value() && name() == enum_rhs->name() &&
*getValueType() == *(enum_rhs->getValueType()) &&
this->compilation_unit() == enum_rhs->compilation_unit();
}

View File

@ -485,7 +485,7 @@ bool FunctionSchema::isForwardCompatibleWith(
return false;
}
auto default_val = arguments().at(i).default_value().value();
auto const &default_val = arguments().at(i).default_value().value();
if (default_val.isList() || default_val.isGenericDict()) {
if (why_not) {
why_not

View File

@ -398,7 +398,7 @@ struct TORCH_API FunctionSchema {
bool is_mutable(std::string_view name) const {
std::optional<int> index = argumentIndexWithName(name);
TORCH_INTERNAL_ASSERT(
index != std::nullopt, "Schema has no argument named ", name);
index.has_value(), "Schema has no argument named ", name);
return is_mutable({c10::SchemaArgType::input, static_cast<size_t>(*index)});
}

View File

@ -6,6 +6,7 @@
#include <c10/util/string_view.h>
#include <cstddef>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
@ -56,6 +57,14 @@ inline std::ostream& _str(std::ostream& ss, const T& t) {
return ss;
}
template <typename T>
inline std::ostream& _str(std::ostream& ss, const std::optional<T>& t) {
if (t.has_value()) {
return _str(ss, t.value());
}
ss << "std::nullopt";
return ss;
}
// Overloads of _str for wide types; forces narrowing.
C10_API std::ostream& _str(std::ostream& ss, const wchar_t* wCStr);
C10_API std::ostream& _str(std::ostream& ss, const wchar_t& wChar);

View File

@ -130,8 +130,9 @@ std::optional<c10::Device> compute_target_device(
}
for (auto& tens_list : opt_tlist_args) {
for (const auto i : c10::irange(tens_list.size())) {
if (tens_list.get(i).has_value()) {
return tens_list.get(i)->device();
auto const& e = tens_list.get(i);
if (e.has_value()) {
return e->device();
}
}
}

View File

@ -511,7 +511,8 @@ inline PyObject* toPyObject(const c10::SymInt& symint) {
return r;
} else {
auto m = symint.maybe_as_int();
return THPUtils_packInt64(*m);
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
return THPUtils_packInt64(m.value());
}
}
@ -812,6 +813,7 @@ inline std::optional<at::Layout> PythonArgs::layoutOptional(int i) {
inline at::Device deviceFromLong(int64_t device_index) {
TORCH_CHECK(device_index >= 0, "Device index must not be negative");
return at::Device(
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
at::getAccelerator(true).value(),
static_cast<c10::DeviceIndex>(device_index));
}

View File

@ -986,14 +986,14 @@ static Tensor sparse_compressed_tensor_ctor_worker(
(required_layout
? r.layoutWithDefault(ARG_LAYOUT, required_layout.value())
: r.layoutOptional(ARG_LAYOUT));
if (required_layout) {
if (required_layout.has_value()) {
TORCH_CHECK(
layout.value() == required_layout.value(),
layout.has_value() && layout == required_layout,
name,
": layout must be ",
required_layout.value(),
" but got ",
layout.value());
layout);
}
return at::sparse_compressed_tensor(
compressed_indices,
@ -1042,14 +1042,14 @@ static Tensor sparse_compressed_tensor_ctor_worker(
(required_layout
? r.layoutWithDefault(ARG_LAYOUT1, required_layout.value())
: r.layoutOptional(ARG_LAYOUT1));
if (required_layout) {
if (required_layout.has_value()) {
TORCH_CHECK(
layout.value() == required_layout.value(),
layout == required_layout,
name,
": layout must be ",
required_layout.value(),
" but got ",
layout.value());
layout);
}
return at::sparse_compressed_tensor(
compressed_indices,

View File

@ -19,7 +19,7 @@ struct StashTorchDispatchModeGuard {
}
~StashTorchDispatchModeGuard() {
if (saved_mode_key_ != std::nullopt) {
if (saved_mode_key_.has_value()) {
c10::impl::TorchDispatchModeTLS::set_mode(
saved_mode_, saved_mode_key_.value());
} else {