mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[3/N] Apply bugprone-unchecked-optional-access (#142442)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/142442 Approved by: https://github.com/albanD
This commit is contained in:
parent
2b105de2c1
commit
7d98b3dcee
|
|
@ -38,7 +38,6 @@ TEST(ListTestIValueBasedList, whenCallingGetWithExistingPosition_thenReturnsElem
|
|||
|
||||
TEST(ListTestIValueBasedList, whenCallingGetWithNonExistingPosition_thenThrowsException) {
|
||||
List<string> list({"3", "4"});
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list.get(2), std::out_of_range);
|
||||
}
|
||||
|
||||
|
|
@ -56,7 +55,6 @@ TEST(ListTestIValueBasedList, whenCallingExtractWithExistingPosition_thenListEle
|
|||
|
||||
TEST(ListTestIValueBasedList, whenCallingExtractWithNonExistingPosition_thenThrowsException) {
|
||||
List<string> list({"3", "4"});
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list.extract(2), std::out_of_range);
|
||||
}
|
||||
|
||||
|
|
@ -79,14 +77,12 @@ TEST(ListTestIValueBasedList, whenCallingMovingSetWithExistingPosition_thenChang
|
|||
TEST(ListTestIValueBasedList, whenCallingCopyingSetWithNonExistingPosition_thenThrowsException) {
|
||||
List<string> list({"3", "4"});
|
||||
string value = "5";
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list.set(2, value), std::out_of_range);
|
||||
}
|
||||
|
||||
TEST(ListTestIValueBasedList, whenCallingMovingSetWithNonExistingPosition_thenThrowsException) {
|
||||
List<string> list({"3", "4"});
|
||||
string value = "5";
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list.set(2, std::move(value)), std::out_of_range);
|
||||
}
|
||||
|
||||
|
|
@ -122,7 +118,6 @@ TEST(ListTestIValueBasedList, whenSwappingFromAccessOperator_thenSwapsElements)
|
|||
|
||||
TEST(ListTestIValueBasedList, whenCallingAccessOperatorWithNonExistingPosition_thenThrowsException) {
|
||||
List<string> list({"3", "4"});
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list[2], std::out_of_range);
|
||||
}
|
||||
|
||||
|
|
@ -586,7 +581,6 @@ TEST(ListTestNonIValueBasedList, whenCallingGetWithExistingPosition_thenReturnsE
|
|||
|
||||
TEST(ListTestNonIValueBasedList, whenCallingGetWithNonExistingPosition_thenThrowsException) {
|
||||
List<int64_t> list({3, 4});
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list.get(2), std::out_of_range);
|
||||
}
|
||||
|
||||
|
|
@ -598,7 +592,6 @@ TEST(ListTestNonIValueBasedList, whenCallingExtractWithExistingPosition_thenRetu
|
|||
|
||||
TEST(ListTestNonIValueBasedList, whenCallingExtractWithNonExistingPosition_thenThrowsException) {
|
||||
List<int64_t> list({3, 4});
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list.extract(2), std::out_of_range);
|
||||
}
|
||||
|
||||
|
|
@ -622,14 +615,12 @@ TEST(ListTestNonIValueBasedList, whenCallingMovingSetWithExistingPosition_thenCh
|
|||
TEST(ListTestNonIValueBasedList, whenCallingCopyingSetWithNonExistingPosition_thenThrowsException) {
|
||||
List<int64_t> list({3, 4});
|
||||
int64_t value = 5;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list.set(2, value), std::out_of_range);
|
||||
}
|
||||
|
||||
TEST(ListTestNonIValueBasedList, whenCallingMovingSetWithNonExistingPosition_thenThrowsException) {
|
||||
List<int64_t> list({3, 4});
|
||||
int64_t value = 5;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,performance-move-const-arg,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list.set(2, std::move(value)), std::out_of_range);
|
||||
}
|
||||
|
||||
|
|
@ -665,7 +656,6 @@ TEST(ListTestNonIValueBasedList, whenSwappingFromAccessOperator_thenSwapsElement
|
|||
|
||||
TEST(ListTestNonIValueBasedList, whenCallingAccessOperatorWithNonExistingPosition_thenThrowsException) {
|
||||
List<int64_t> list({3, 4});
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
|
||||
EXPECT_THROW(list[2], std::out_of_range);
|
||||
}
|
||||
|
||||
|
|
@ -1134,12 +1124,11 @@ TEST(ListTest, canAccessOptionalStringByReference) {
|
|||
"List<std::optional<std::string>> access should be by const reference");
|
||||
std::optional<std::string> str1 = list[1];
|
||||
std::optional<std::string> str2 = list[2];
|
||||
decltype(auto) strRef1 = listRef[1];
|
||||
decltype(auto) strRef2 = listRef[2];
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
EXPECT_EQ("two", str1.value());
|
||||
auto const& strRef1 = listRef[1];
|
||||
auto const& strRef2 = listRef[2];
|
||||
EXPECT_EQ("two", str1);
|
||||
EXPECT_FALSE(str2.has_value());
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
EXPECT_TRUE(strRef1.has_value());
|
||||
EXPECT_EQ("two", strRef1.value().get());
|
||||
EXPECT_FALSE(strRef2.has_value());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -80,7 +80,8 @@ DynamicType::~DynamicType() {
|
|||
|
||||
std::shared_ptr<const DynamicType> DynamicType::create(const Type& other) {
|
||||
if (auto dynRaw = other.castRaw<DynamicType>()) {
|
||||
TORCH_INTERNAL_ASSERT(!dynRaw->weak_from_this().expired(),
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
!dynRaw->weak_from_this().expired(),
|
||||
"Error creating dynamic type instance not managed by shared_ptr: ",
|
||||
other.str());
|
||||
}
|
||||
|
|
@ -92,7 +93,8 @@ std::shared_ptr<const DynamicType> DynamicType::create(const Type& other) {
|
|||
|
||||
DynamicTypePtr DynamicType::create(Type& other) {
|
||||
if (auto dynRaw = other.castRaw<DynamicType>()) {
|
||||
TORCH_INTERNAL_ASSERT(!dynRaw->weak_from_this().expired(),
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
!dynRaw->weak_from_this().expired(),
|
||||
"Error creating dynamic type instance not managed by shared_ptr: ",
|
||||
other.str());
|
||||
}
|
||||
|
|
@ -262,7 +264,7 @@ TypePtr DynamicType::fallback() const {
|
|||
fields.reserve(arguments_.elems.size());
|
||||
for (const auto& elem : arguments_.elems) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
fields.emplace_back(*elem.label);
|
||||
fields.emplace_back(elem.label.value());
|
||||
}
|
||||
return TupleType::createNamed(*name_, fields, fallbacks);
|
||||
}
|
||||
|
|
@ -292,7 +294,7 @@ TypePtr DynamicType::fallback() const {
|
|||
return StorageType::get();
|
||||
case Tag::Var:
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return VarType::create(*name_);
|
||||
return VarType::create(name_.value());
|
||||
case Tag::AnyClass:
|
||||
return AnyClassType::get();
|
||||
case Tag::QScheme:
|
||||
|
|
|
|||
|
|
@ -592,8 +592,8 @@ struct TORCH_API TensorType : public SharedType {
|
|||
static TensorTypePtr create(
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
const SymbolicShape& sizes,
|
||||
const VaryingShape<Stride>& stride_,
|
||||
SymbolicShape sizes,
|
||||
VaryingShape<Stride> stride_,
|
||||
std::optional<bool> requires_grad,
|
||||
std::optional<bool> undefined = false);
|
||||
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ Library::Library(Kind kind, std::string ns, std::optional<c10::DispatchKey> k, c
|
|||
registrars_.emplace_back(
|
||||
c10::Dispatcher::singleton().registerLibrary(
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
*ns_, debugString(file_, line_)
|
||||
ns_.value(), debugString(file_, line_)
|
||||
)
|
||||
);
|
||||
[[fallthrough]];
|
||||
|
|
@ -207,12 +207,10 @@ at::OperatorName Library::_parseNameForLib(const char* name_str) const {
|
|||
// This is a copy paste of Library::_impl
|
||||
if (ns_opt.has_value()) {
|
||||
// See Note [Redundancy in registration code is OK]
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
TORCH_CHECK(*ns_opt == *ns_,
|
||||
TORCH_CHECK(ns_opt == ns_,
|
||||
IMPL_PRELUDE,
|
||||
"Explicitly provided namespace (", *ns_opt, ") in operator name "
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
"does not match namespace of enclosing ", toString(kind_), " block (", *ns_, "). "
|
||||
"Explicitly provided namespace (", ns_opt, ") in operator name "
|
||||
"does not match namespace of enclosing ", toString(kind_), " block (", ns_, "). "
|
||||
"Move this definition to the ", toString(kind_), " block corresponding to this namespace "
|
||||
"(and consider deleting the namespace from your schema string.) ",
|
||||
ERROR_CONTEXT
|
||||
|
|
|
|||
|
|
@ -75,9 +75,9 @@ std::ostream& operator<<(std::ostream& out, const VaryingShape<T>& vs) {
|
|||
if (i > 0) {
|
||||
out << ", ";
|
||||
}
|
||||
if (vs[i].has_value()) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
out << vs[i].value();
|
||||
auto const& v = vs[i];
|
||||
if (v.has_value()) {
|
||||
out << v.value();
|
||||
} else {
|
||||
out << "*";
|
||||
}
|
||||
|
|
@ -98,20 +98,20 @@ std::ostream& operator<<(
|
|||
const SymbolicShape& ss) {
|
||||
// TODO: Unranked SymbolicShape printing is ambiguous with that of
|
||||
// dynamic-shaped vector.
|
||||
if(!ss.rank()) {
|
||||
auto rank_opt = ss.rank();
|
||||
if(!rank_opt.has_value()) {
|
||||
os << "(*)";
|
||||
return os;
|
||||
}
|
||||
|
||||
auto sizes = ss.sizes().value();
|
||||
auto sizes_opt = ss.sizes();
|
||||
|
||||
os << "(";
|
||||
for (size_t i = 0; i < ss.rank().value(); i++) {
|
||||
for (size_t i = 0; i < rank_opt.value(); i++) {
|
||||
if (i > 0) {
|
||||
os << ", ";
|
||||
}
|
||||
if(sizes[i].is_static()) {
|
||||
os << sizes[i];
|
||||
if(sizes_opt.has_value() && sizes_opt.value()[i].is_static()) {
|
||||
os << sizes_opt.value()[i];
|
||||
} else {
|
||||
os << "*";
|
||||
}
|
||||
|
|
@ -280,25 +280,21 @@ TensorTypePtr TensorType::create(
|
|||
const VaryingShape<int64_t>& strides,
|
||||
std::optional<bool> requires_grad,
|
||||
std::optional<bool> undefined, bool tensor_contiguity) {
|
||||
if(strides.concrete_sizes() && strides.concrete_sizes().has_value()){
|
||||
const auto stride_concrete_sizes = strides.concrete_sizes();
|
||||
if(stride_concrete_sizes.has_value()){
|
||||
const auto size_concrete_sizes = sizes.concrete_sizes();
|
||||
// handles case where strides are set
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
TORCH_INTERNAL_ASSERT(sizes.concrete_sizes()->size() == strides.concrete_sizes()->size());
|
||||
auto sprops = strides.concrete_sizes().has_value()
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
? computeStrideProps(*sizes.concrete_sizes(), *strides.concrete_sizes(), tensor_contiguity)
|
||||
: VaryingShape<Stride>();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
auto symbol_sizes = SymbolicShape(*sizes.concrete_sizes());
|
||||
TORCH_INTERNAL_ASSERT(size_concrete_sizes.has_value() && size_concrete_sizes->size() == stride_concrete_sizes->size());
|
||||
auto sprops =
|
||||
computeStrideProps(*size_concrete_sizes, *stride_concrete_sizes, tensor_contiguity);
|
||||
auto symbol_sizes = SymbolicShape(*size_concrete_sizes);
|
||||
return TensorType::create(
|
||||
scalar_type, device, symbol_sizes, sprops, requires_grad, undefined);
|
||||
} else {
|
||||
// strides are all null, but still have number of strides equal to number of ranks
|
||||
TORCH_INTERNAL_ASSERT(sizes.sizes() && sizes.size());
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
auto symbol_sizes = SymbolicShape(*sizes.sizes());
|
||||
return TensorType::create(
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
scalar_type, device, symbol_sizes, VaryingShape<Stride>(*sizes.size()), requires_grad, undefined);
|
||||
}
|
||||
}
|
||||
|
|
@ -306,12 +302,12 @@ TensorTypePtr TensorType::create(
|
|||
TensorTypePtr TensorType::create(
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
const SymbolicShape& sizes,
|
||||
const VaryingShape<Stride>& strides,
|
||||
SymbolicShape sizes,
|
||||
VaryingShape<Stride> strides,
|
||||
std::optional<bool> requires_grad,
|
||||
std::optional<bool> undefined) {
|
||||
auto pt = TensorTypePtr(new TensorType(
|
||||
scalar_type, device, sizes, strides, requires_grad, undefined));
|
||||
scalar_type, device, std::move(sizes), std::move(strides), requires_grad, undefined));
|
||||
return pt;
|
||||
}
|
||||
|
||||
|
|
@ -371,7 +367,7 @@ TensorTypePtr TensorType::merge(const TensorType& other, bool merge_sizes) const
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
bool is_null_or_equal(std::optional<T> a, c10::IntArrayRef b) {
|
||||
static bool is_null_or_equal(std::optional<T> a, c10::IntArrayRef b) {
|
||||
return !a.has_value() || a.value() == b;
|
||||
}
|
||||
|
||||
|
|
@ -414,15 +410,16 @@ bool TensorType::equals(const c10::Type& rhs) const {
|
|||
}
|
||||
|
||||
VaryingShape<int64_t> TensorType::strides() const {
|
||||
if (!strides_.size().has_value()) {
|
||||
auto const strides_sizes = strides_.sizes();
|
||||
if (!strides_sizes.has_value()) {
|
||||
return VaryingShape<int64_t>();
|
||||
}
|
||||
std::vector<std::optional<int64_t>> ss(*strides_.size());
|
||||
for (size_t i = 0; i < *strides_.size(); i++) {
|
||||
if (!strides_[i].has_value()) {
|
||||
std::vector<std::optional<int64_t>> ss(strides_sizes->size());
|
||||
for (auto const& stride:strides_sizes.value()) {
|
||||
if (!stride.has_value()) {
|
||||
continue;
|
||||
}
|
||||
auto s = *strides_[i];
|
||||
const auto& s = *stride;
|
||||
if (s.stride_index_.has_value() && s.stride_.has_value()) {
|
||||
ss[*s.stride_index_] = *s.stride_;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -186,7 +186,7 @@ OptionalType::OptionalType(const TypePtr& contained)
|
|||
std::vector<TypePtr> to_subtract{NoneType::get()};
|
||||
auto without_none = subtractTypeSetFrom(to_subtract, types_);
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
contained_ = UnionType::create({*without_none});
|
||||
contained_ = UnionType::create({std::move(without_none.value())});
|
||||
}
|
||||
has_free_variables_ = contained_->hasFreeVariables();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,8 +76,7 @@ PyObject* THPSize_NewFromSymSizes(const at::Tensor& self_) {
|
|||
throw python_error();
|
||||
PyTuple_SET_ITEM(ret.get(), i, py_size_tensor);
|
||||
} else {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
PyTuple_SET_ITEM(ret.get(), i, THPUtils_packInt64(*m));
|
||||
PyTuple_SET_ITEM(ret.get(), i, THPUtils_packInt64(m.value()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -262,7 +262,6 @@ auto ReadyQueue::pop() -> NodeTask {
|
|||
// Lock mutex for accesses to heap_
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
not_empty_.wait(lock, [this] { return !heap_.empty(); });
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
auto task = std::move(const_cast<NodeTask&>(heap_.top()));
|
||||
heap_.pop();
|
||||
return task;
|
||||
|
|
@ -735,14 +734,14 @@ void GraphTask::exec_post_processing() {
|
|||
// the stashed streams should be enough. If leaf_stream.device_index()
|
||||
// happens to be for a new device, operator* on the std::nullopt should
|
||||
// throw an error.
|
||||
const auto caller_current_stream =
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
*caller_current_streams_[leaf_stream.device_index()];
|
||||
const auto& caller_current_stream =
|
||||
caller_current_streams_[leaf_stream.device_index()];
|
||||
|
||||
if (caller_current_stream != leaf_stream) {
|
||||
if (caller_current_stream.has_value() &&
|
||||
caller_current_stream != leaf_stream) {
|
||||
auto event = c10::Event{leaf_stream.device_type()};
|
||||
event.record(leaf_stream);
|
||||
caller_current_stream.wait(event);
|
||||
caller_current_stream->wait(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -158,15 +158,15 @@ void InputBuffer::add(
|
|||
// the consumer or producer.
|
||||
// Accumulation happens on the var device's default stream.
|
||||
|
||||
TORCH_INTERNAL_ASSERT(device_of(var));
|
||||
auto const device = device_of(var);
|
||||
TORCH_INTERNAL_ASSERT(device.has_value());
|
||||
std::optional<c10::Stream> opt_accumulate_stream = std::nullopt;
|
||||
const auto device_type = device_of(var).value().type();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
if (device_of(var)->is_cuda() || device_of(var)->is_privateuseone()) {
|
||||
const auto device_type = device->type();
|
||||
if (device->is_cuda() || device->is_privateuseone()) {
|
||||
const auto on_producer =
|
||||
opt_producer_stream && device_of(var) == opt_producer_stream->device();
|
||||
opt_producer_stream && device == opt_producer_stream->device();
|
||||
const auto on_consumer =
|
||||
opt_consumer_stream && device_of(var) == opt_consumer_stream->device();
|
||||
opt_consumer_stream && device == opt_consumer_stream->device();
|
||||
|
||||
if (on_producer && on_consumer) {
|
||||
// (2a)
|
||||
|
|
@ -192,8 +192,7 @@ void InputBuffer::add(
|
|||
opt_sync_stream = opt_producer_stream;
|
||||
} else {
|
||||
// (5)
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
opt_accumulate_stream = guard.getDefaultStream(*device_of(var));
|
||||
opt_accumulate_stream = guard.getDefaultStream(*device);
|
||||
}
|
||||
if (opt_sync_stream && (opt_accumulate_stream != opt_sync_stream)) {
|
||||
// (3b), (4b)
|
||||
|
|
@ -217,7 +216,7 @@ void InputBuffer::add(
|
|||
} else {
|
||||
// (1) non-CUDA/privateuse1 variable
|
||||
// Accumulation happens on variable's device
|
||||
c10::OptionalDeviceGuard device_guard{device_of(var)};
|
||||
c10::OptionalDeviceGuard device_guard{device};
|
||||
accumulate(buffer, pos, std::move(var));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -457,8 +457,9 @@ ExtraFields<EventType::PyCall>::args_t ValueCache::load<
|
|||
OptimizerInfo info{
|
||||
key, cls, cache.cls_names_.at(cls), cls_and_parameters.parameters_};
|
||||
return {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
/*frame_state_=*/std::get<CallType::PyCall>(state_).at(*cache.location_),
|
||||
/*frame_state_=*/std::get<CallType::PyCall>(state_).at(
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
cache.location_.value()),
|
||||
/*module_info_=*/std::nullopt,
|
||||
/*optimizer_info_=*/std::move(info)};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -726,6 +726,7 @@ struct TORCH_API DifferentiableViewMeta : public AutogradMeta {
|
|||
const ViewInfo& get_backward_view() const {
|
||||
TORCH_CHECK(
|
||||
has_bw_view(), "backward view info can only exist for backward views.");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return backward_info_.value();
|
||||
}
|
||||
|
||||
|
|
@ -763,6 +764,7 @@ struct TORCH_API DifferentiableViewMeta : public AutogradMeta {
|
|||
TORCH_CHECK(
|
||||
!shared_view_info_ || has_bw_view(),
|
||||
"forward view info can only exist for forward views.");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return shared_view_info_ ? backward_info_.value() : forward_info_.value();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1162,8 +1162,8 @@ void Reducer::initialize_buckets(
|
|||
// Make gradient type in the reduced precision if mixed precision is
|
||||
// enabled. This ensures that the type is correct when e.g. rebuilding
|
||||
// buckets.
|
||||
if (mixed_precision_param_dtype_) {
|
||||
options = options.dtype(*mixed_precision_param_dtype_);
|
||||
if (mixed_precision_param_dtype_.has_value()) {
|
||||
options = options.dtype(mixed_precision_param_dtype_);
|
||||
}
|
||||
bucket.gradients = at::empty({static_cast<long>(offset)}, options);
|
||||
|
||||
|
|
@ -1625,8 +1625,9 @@ void Reducer::finalize_backward() {
|
|||
// sparse metadata is set so the bucket should have sparse_tensor_indices
|
||||
if (sparse_metadata_) {
|
||||
REDUCER_CHECK(
|
||||
bucket.sparse_tensor_indices.value().numel() ==
|
||||
bucket.gradients.sizes()[0],
|
||||
bucket.sparse_tensor_indices.has_value() &&
|
||||
bucket.sparse_tensor_indices.value().numel() ==
|
||||
bucket.gradients.sizes()[0],
|
||||
logger_,
|
||||
"Sparse metadata and gradient size mismatch");
|
||||
auto sparse_result = at::sparse_coo_tensor(
|
||||
|
|
@ -1689,7 +1690,7 @@ void Reducer::finalize_backward() {
|
|||
|
||||
void Reducer::runGradCallbackForVariable(
|
||||
at::Tensor& variable,
|
||||
GradCallback&& cb) {
|
||||
const GradCallback& cb) {
|
||||
#ifdef _WIN32
|
||||
cb(variable.mutable_grad());
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -308,7 +308,7 @@ class TORCH_API Reducer {
|
|||
GradCallback,
|
||||
torch::distributed::autograd::DistAutogradContext::GradCallback>);
|
||||
#endif
|
||||
void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb);
|
||||
void runGradCallbackForVariable(at::Tensor& variable, const GradCallback& cb);
|
||||
|
||||
// This function is called inside `initialize_buckets()`. It initializes both
|
||||
// `bucket_views_in` and `bucket_views_out` with views for each variable's
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ uint64_t SequenceNum::get() const {
|
|||
|
||||
void SequenceNum::increment() {
|
||||
std::lock_guard<std::mutex> lock(lock_);
|
||||
TORCH_CHECK(num_ != std::nullopt);
|
||||
TORCH_CHECK(num_.has_value());
|
||||
num_ = ++(*num_);
|
||||
}
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ void SequenceNum::increment() {
|
|||
uint64_t SequenceNum::getAndIncrement() {
|
||||
uint64_t curVal = 0;
|
||||
std::lock_guard<std::mutex> lock(lock_);
|
||||
TORCH_CHECK(num_ != std::nullopt);
|
||||
TORCH_CHECK(num_.has_value());
|
||||
curVal = *num_;
|
||||
num_ = ++(*num_);
|
||||
return curVal;
|
||||
|
|
@ -45,7 +45,7 @@ void SequenceNum::set(const uint64_t num) {
|
|||
|
||||
bool SequenceNum::isSet() const {
|
||||
std::lock_guard<std::mutex> lock(lock_);
|
||||
return num_ != std::nullopt;
|
||||
return num_.has_value();
|
||||
}
|
||||
|
||||
SequenceNum& SequenceNum::operator=(const SequenceNum& other) {
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ bool ScriptCall::hasQualifiedName() const {
|
|||
}
|
||||
|
||||
const c10::QualifiedName& ScriptCall::qualifiedName() const {
|
||||
return *qualifiedName_;
|
||||
return qualifiedName_.value();
|
||||
}
|
||||
|
||||
const std::vector<at::IValue>& ScriptCall::stack() const {
|
||||
|
|
|
|||
|
|
@ -122,8 +122,9 @@ BackendDataPtr LazyTensor::GetDataHandle() {
|
|||
if (data()->ir_value) {
|
||||
ApplyPendingGraph();
|
||||
} else {
|
||||
TORCH_CHECK(data()->tensor_data);
|
||||
data()->handle = TensorToDataHandle(*data()->tensor_data, GetDevice());
|
||||
auto const& tensor_data = data()->tensor_data;
|
||||
TORCH_CHECK(tensor_data.has_value());
|
||||
data()->handle = TensorToDataHandle(*tensor_data, GetDevice());
|
||||
}
|
||||
|
||||
return data()->handle;
|
||||
|
|
|
|||
|
|
@ -205,8 +205,9 @@ bool LTCTensorImpl::is_contiguous_custom(c10::MemoryFormat _unused) const {
|
|||
// TODO(ezyang): I don't think this branch is actually necessary
|
||||
// TODO(ezyang): I don't think this logic is right, shouldn't we pass on
|
||||
// the memory format?
|
||||
if (tensor_->CurrentTensorData()) {
|
||||
return tensor_->CurrentTensorData()->is_contiguous();
|
||||
const auto data = tensor_->CurrentTensorData();
|
||||
if (data.has_value()) {
|
||||
return data->is_contiguous();
|
||||
}
|
||||
// Only check that the storage is already contiguous.
|
||||
TORCH_CHECK(is_contiguous_, "Non-contiguous storage for lazy tensor");
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ void calculateUniqueTensorIDs(
|
|||
for (const auto& t : tensors) {
|
||||
if (t.impl_ != NoTensorImpl) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
tensor_set.insert(*t.allocation_id_ref_.get());
|
||||
tensor_set.insert(t.allocation_id_ref_.get().value());
|
||||
}
|
||||
}
|
||||
tensors.erase(
|
||||
|
|
@ -136,7 +136,7 @@ void calculateUniqueTensorIDs(
|
|||
tensors.begin(),
|
||||
tensors.end(),
|
||||
[&tensor_set](const auto& i) {
|
||||
auto it = tensor_set.find(*i.allocation_id_ref_.get());
|
||||
auto it = tensor_set.find(i.allocation_id_ref_.get().value());
|
||||
return it == tensor_set.end();
|
||||
}),
|
||||
tensors.end());
|
||||
|
|
@ -188,7 +188,7 @@ void calculateUniqueTensorIDs(
|
|||
// --------------------------------------------------------------------------
|
||||
for (const auto& t : tensors) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
const auto id = id_map.at(*t.allocation_id_ref_.get());
|
||||
const auto id = id_map.at(t.allocation_id_ref_.get().value());
|
||||
t.id_ref_.get().emplace(TensorID(id));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,13 +14,11 @@ void check_out_type_matches(
|
|||
if (scalarType_is_none && !layout && device_is_none) { // common case
|
||||
return;
|
||||
}
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
if (!scalarType_is_none && result.scalar_type() != scalarType.value()) {
|
||||
if (!scalarType_is_none && result.scalar_type() != scalarType) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"dtype ",
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
*scalarType,
|
||||
scalarType,
|
||||
" does not match dtype of out parameter (",
|
||||
result.scalar_type(),
|
||||
")");
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ py::handle type_caster<c10::SymInt>::cast(
|
|||
} else {
|
||||
auto m = si.maybe_as_int();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return py::cast(*m).release();
|
||||
return py::cast(m.value()).release();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user