mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Eliminate C10_NODISCARD (#138336)
Test Plan: Sandcastle Reviewed By: swolchok Pull Request resolved: https://github.com/pytorch/pytorch/pull/138336 Approved by: https://github.com/Skylion007
This commit is contained in:
parent
a4b6ef178c
commit
542f7c8383
|
|
@ -314,7 +314,7 @@ public:
|
||||||
*
|
*
|
||||||
* @return The number of elements removed. This is either '1' if an element with the key existed, or '0' if it didn't.
|
* @return The number of elements removed. This is either '1' if an element with the key existed, or '0' if it didn't.
|
||||||
*/
|
*/
|
||||||
C10_NODISCARD size_t erase(const Key& key) const;
|
[[nodiscard]] size_t erase(const Key& key) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the mapped value of the element with key equivalent to key.
|
* Returns the mapped value of the element with key equivalent to key.
|
||||||
|
|
|
||||||
|
|
@ -142,8 +142,8 @@ void Dict<Key, Value>::erase(iterator iter) const {
|
||||||
impl_->dict.erase(iter.entryRef_.iterator_);
|
impl_->dict.erase(iter.entryRef_.iterator_);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Key, class Value>
|
template <class Key, class Value>
|
||||||
C10_NODISCARD size_t Dict<Key, Value>::erase(const Key& key) const {
|
[[nodiscard]] size_t Dict<Key, Value>::erase(const Key& key) const {
|
||||||
return impl_->dict.erase(key);
|
return impl_->dict.erase(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -108,7 +108,7 @@ struct TORCH_API Argument {
|
||||||
return is_out_;
|
return is_out_;
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const AliasInfo* alias_info() const {
|
[[nodiscard]] const AliasInfo* alias_info() const {
|
||||||
return alias_info_.get();
|
return alias_info_.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -522,7 +522,7 @@ struct TORCH_API IValue final {
|
||||||
}
|
}
|
||||||
c10::intrusive_ptr<ivalue::Tuple> toTuple() &&;
|
c10::intrusive_ptr<ivalue::Tuple> toTuple() &&;
|
||||||
c10::intrusive_ptr<ivalue::Tuple> toTuple() const&;
|
c10::intrusive_ptr<ivalue::Tuple> toTuple() const&;
|
||||||
C10_NODISCARD ivalue::Tuple& toTupleRef() const;
|
[[nodiscard]] ivalue::Tuple& toTupleRef() const;
|
||||||
|
|
||||||
// Double
|
// Double
|
||||||
IValue(double d) : tag(Tag::Double) {
|
IValue(double d) : tag(Tag::Double) {
|
||||||
|
|
|
||||||
|
|
@ -500,7 +500,7 @@ struct TORCH_API TupleElements {
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD c10::ArrayRef<IValue> asArrayRef() const {
|
[[nodiscard]] c10::ArrayRef<IValue> asArrayRef() const {
|
||||||
if (inlineSize_) {
|
if (inlineSize_) {
|
||||||
return c10::ArrayRef<IValue>(elementsInline_, inlineSize_);
|
return c10::ArrayRef<IValue>(elementsInline_, inlineSize_);
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -527,15 +527,15 @@ struct TORCH_API TupleElements {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD bool empty() const {
|
[[nodiscard]] bool empty() const {
|
||||||
return inlineSize_ ? false : elementsVector_.empty();
|
return inlineSize_ ? false : elementsVector_.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD size_t size() const {
|
[[nodiscard]] size_t size() const {
|
||||||
return inlineSize_ ? inlineSize_ : elementsVector_.size();
|
return inlineSize_ ? inlineSize_ : elementsVector_.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD IValue& operator[](size_t idx) {
|
[[nodiscard]] IValue& operator[](size_t idx) {
|
||||||
if (inlineSize_) {
|
if (inlineSize_) {
|
||||||
return elementsInline_[idx];
|
return elementsInline_[idx];
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -543,7 +543,7 @@ struct TORCH_API TupleElements {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const IValue& operator[](size_t idx) const {
|
[[nodiscard]] const IValue& operator[](size_t idx) const {
|
||||||
if (inlineSize_) {
|
if (inlineSize_) {
|
||||||
return elementsInline_[idx];
|
return elementsInline_[idx];
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -551,7 +551,7 @@ struct TORCH_API TupleElements {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD IValue& at(size_t idx) {
|
[[nodiscard]] IValue& at(size_t idx) {
|
||||||
if (inlineSize_) {
|
if (inlineSize_) {
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3);
|
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3);
|
||||||
TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_);
|
TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_);
|
||||||
|
|
@ -561,7 +561,7 @@ struct TORCH_API TupleElements {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const IValue& at(size_t idx) const {
|
[[nodiscard]] const IValue& at(size_t idx) const {
|
||||||
if (inlineSize_) {
|
if (inlineSize_) {
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3);
|
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3);
|
||||||
TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_);
|
TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_);
|
||||||
|
|
@ -572,7 +572,7 @@ struct TORCH_API TupleElements {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD iterator begin() {
|
[[nodiscard]] iterator begin() {
|
||||||
if (inlineSize_) {
|
if (inlineSize_) {
|
||||||
return elementsInline_;
|
return elementsInline_;
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -580,7 +580,7 @@ struct TORCH_API TupleElements {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD iterator end() {
|
[[nodiscard]] iterator end() {
|
||||||
if (inlineSize_) {
|
if (inlineSize_) {
|
||||||
return elementsInline_ + inlineSize_;
|
return elementsInline_ + inlineSize_;
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -588,7 +588,7 @@ struct TORCH_API TupleElements {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const_iterator begin() const {
|
[[nodiscard]] const_iterator begin() const {
|
||||||
if (inlineSize_) {
|
if (inlineSize_) {
|
||||||
return elementsInline_;
|
return elementsInline_;
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -596,7 +596,7 @@ struct TORCH_API TupleElements {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const_iterator end() const {
|
[[nodiscard]] const_iterator end() const {
|
||||||
if (inlineSize_) {
|
if (inlineSize_) {
|
||||||
return elementsInline_ + inlineSize_;
|
return elementsInline_ + inlineSize_;
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -604,27 +604,27 @@ struct TORCH_API TupleElements {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const_iterator cbegin() const {
|
[[nodiscard]] const_iterator cbegin() const {
|
||||||
return begin();
|
return begin();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const_iterator cend() const {
|
[[nodiscard]] const_iterator cend() const {
|
||||||
return end();
|
return end();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD std::vector<IValue> vec() const & {
|
[[nodiscard]] std::vector<IValue> vec() const& {
|
||||||
return asArrayRef().vec();
|
return asArrayRef().vec();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD IValue& back() {
|
[[nodiscard]] IValue& back() {
|
||||||
return *(end() - 1);
|
return *(end() - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const IValue& back() const {
|
[[nodiscard]] const IValue& back() const {
|
||||||
return *(end() - 1);
|
return *(end() - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD std::vector<IValue> vec() && {
|
[[nodiscard]] std::vector<IValue> vec() && {
|
||||||
std::vector<IValue> result;
|
std::vector<IValue> result;
|
||||||
result.reserve(size());
|
result.reserve(size());
|
||||||
for (auto&& iv : *this) {
|
for (auto&& iv : *this) {
|
||||||
|
|
|
||||||
|
|
@ -103,7 +103,7 @@ class C10_API DataPtr {
|
||||||
* be; be sure to read the source code of the Allocator
|
* be; be sure to read the source code of the Allocator
|
||||||
* in question to confirm this.
|
* in question to confirm this.
|
||||||
*/
|
*/
|
||||||
C10_NODISCARD bool compare_exchange_deleter(
|
[[nodiscard]] bool compare_exchange_deleter(
|
||||||
DeleterFnPtr expected_deleter,
|
DeleterFnPtr expected_deleter,
|
||||||
DeleterFnPtr new_deleter) {
|
DeleterFnPtr new_deleter) {
|
||||||
return ptr_.compare_exchange_deleter(expected_deleter, new_deleter);
|
return ptr_.compare_exchange_deleter(expected_deleter, new_deleter);
|
||||||
|
|
|
||||||
|
|
@ -349,10 +349,10 @@ class DispatchKeySet final {
|
||||||
}
|
}
|
||||||
// Add a DispatchKey to the DispatchKey set. Does NOT mutate,
|
// Add a DispatchKey to the DispatchKey set. Does NOT mutate,
|
||||||
// returns the extended DispatchKeySet!
|
// returns the extended DispatchKeySet!
|
||||||
C10_NODISCARD constexpr DispatchKeySet add(DispatchKey t) const {
|
[[nodiscard]] constexpr DispatchKeySet add(DispatchKey t) const {
|
||||||
return *this | DispatchKeySet(t);
|
return *this | DispatchKeySet(t);
|
||||||
}
|
}
|
||||||
C10_NODISCARD constexpr DispatchKeySet add(DispatchKeySet ks) const {
|
[[nodiscard]] constexpr DispatchKeySet add(DispatchKeySet ks) const {
|
||||||
return *this | ks;
|
return *this | ks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -380,7 +380,7 @@ class DispatchKeySet final {
|
||||||
//
|
//
|
||||||
// Instead, remove(DispatchKey.AutogradCPU) will only remove the "Autograd"
|
// Instead, remove(DispatchKey.AutogradCPU) will only remove the "Autograd"
|
||||||
// bit from the bitset.
|
// bit from the bitset.
|
||||||
C10_NODISCARD constexpr DispatchKeySet remove(DispatchKey t) const {
|
[[nodiscard]] constexpr DispatchKeySet remove(DispatchKey t) const {
|
||||||
return DispatchKeySet(
|
return DispatchKeySet(
|
||||||
repr_ & ~(DispatchKeySet(t).repr_ & ~full_backend_mask));
|
repr_ & ~(DispatchKeySet(t).repr_ & ~full_backend_mask));
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -192,8 +192,8 @@ struct C10_API TensorOptions {
|
||||||
|
|
||||||
/// Return a copy of `TensorOptions` with `device` set to the given one, or
|
/// Return a copy of `TensorOptions` with `device` set to the given one, or
|
||||||
/// cleared if `device` is `nullopt`.
|
/// cleared if `device` is `nullopt`.
|
||||||
C10_NODISCARD TensorOptions
|
[[nodiscard]] TensorOptions device(
|
||||||
device(std::optional<Device> device) const noexcept {
|
std::optional<Device> device) const noexcept {
|
||||||
TensorOptions r = *this;
|
TensorOptions r = *this;
|
||||||
r.set_device(device);
|
r.set_device(device);
|
||||||
return r;
|
return r;
|
||||||
|
|
@ -203,7 +203,7 @@ struct C10_API TensorOptions {
|
||||||
/// (This overload ensures that variadic template std::optional constructor
|
/// (This overload ensures that variadic template std::optional constructor
|
||||||
/// for Device work correctly.)
|
/// for Device work correctly.)
|
||||||
template <typename... Args>
|
template <typename... Args>
|
||||||
C10_NODISCARD TensorOptions device(Args&&... args) const noexcept {
|
[[nodiscard]] TensorOptions device(Args&&... args) const noexcept {
|
||||||
return device(
|
return device(
|
||||||
std::optional<Device>(std::in_place, std::forward<Args>(args)...));
|
std::optional<Device>(std::in_place, std::forward<Args>(args)...));
|
||||||
}
|
}
|
||||||
|
|
@ -213,22 +213,22 @@ struct C10_API TensorOptions {
|
||||||
///
|
///
|
||||||
/// TODO: This function encourages bad behavior (assuming CUDA is
|
/// TODO: This function encourages bad behavior (assuming CUDA is
|
||||||
/// the only device that matters). Get rid of it / rename it.
|
/// the only device that matters). Get rid of it / rename it.
|
||||||
C10_NODISCARD TensorOptions
|
[[nodiscard]] TensorOptions device_index(
|
||||||
device_index(c10::DeviceIndex device_index) const noexcept {
|
c10::DeviceIndex device_index) const noexcept {
|
||||||
return device(Device::Type::CUDA, device_index);
|
return device(Device::Type::CUDA, device_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a copy of `TensorOptions` with `dtype` set to the given one.
|
/// Return a copy of `TensorOptions` with `dtype` set to the given one.
|
||||||
C10_NODISCARD TensorOptions
|
[[nodiscard]] TensorOptions dtype(
|
||||||
dtype(std::optional<caffe2::TypeMeta> dtype) const noexcept {
|
std::optional<caffe2::TypeMeta> dtype) const noexcept {
|
||||||
TensorOptions r = *this;
|
TensorOptions r = *this;
|
||||||
r.set_dtype(dtype);
|
r.set_dtype(dtype);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
// legacy function to support ScalarType
|
// legacy function to support ScalarType
|
||||||
C10_NODISCARD TensorOptions
|
[[nodiscard]] TensorOptions dtype(
|
||||||
dtype(std::optional<ScalarType> dtype) const noexcept {
|
std::optional<ScalarType> dtype) const noexcept {
|
||||||
TensorOptions r = *this;
|
TensorOptions r = *this;
|
||||||
r.set_dtype(dtype);
|
r.set_dtype(dtype);
|
||||||
return r;
|
return r;
|
||||||
|
|
@ -243,32 +243,32 @@ struct C10_API TensorOptions {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets the layout of the `TensorOptions`.
|
/// Sets the layout of the `TensorOptions`.
|
||||||
C10_NODISCARD TensorOptions
|
[[nodiscard]] TensorOptions layout(
|
||||||
layout(std::optional<Layout> layout) const noexcept {
|
std::optional<Layout> layout) const noexcept {
|
||||||
TensorOptions r = *this;
|
TensorOptions r = *this;
|
||||||
r.set_layout(layout);
|
r.set_layout(layout);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets the `requires_grad` property of the `TensorOptions`.
|
/// Sets the `requires_grad` property of the `TensorOptions`.
|
||||||
C10_NODISCARD TensorOptions
|
[[nodiscard]] TensorOptions requires_grad(
|
||||||
requires_grad(std::optional<bool> requires_grad) const noexcept {
|
std::optional<bool> requires_grad) const noexcept {
|
||||||
TensorOptions r = *this;
|
TensorOptions r = *this;
|
||||||
r.set_requires_grad(requires_grad);
|
r.set_requires_grad(requires_grad);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets the `pinned_memory` property on the `TensorOptions`.
|
/// Sets the `pinned_memory` property on the `TensorOptions`.
|
||||||
C10_NODISCARD TensorOptions
|
[[nodiscard]] TensorOptions pinned_memory(
|
||||||
pinned_memory(std::optional<bool> pinned_memory) const noexcept {
|
std::optional<bool> pinned_memory) const noexcept {
|
||||||
TensorOptions r = *this;
|
TensorOptions r = *this;
|
||||||
r.set_pinned_memory(pinned_memory);
|
r.set_pinned_memory(pinned_memory);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets the `memory_format` property on `TensorOptions`.
|
/// Sets the `memory_format` property on `TensorOptions`.
|
||||||
C10_NODISCARD TensorOptions
|
[[nodiscard]] TensorOptions memory_format(
|
||||||
memory_format(std::optional<MemoryFormat> memory_format) const noexcept {
|
std::optional<MemoryFormat> memory_format) const noexcept {
|
||||||
TensorOptions r = *this;
|
TensorOptions r = *this;
|
||||||
r.set_memory_format(memory_format);
|
r.set_memory_format(memory_format);
|
||||||
return r;
|
return r;
|
||||||
|
|
|
||||||
|
|
@ -115,9 +115,6 @@
|
||||||
#define C10_HAS_CPP_ATTRIBUTE(x) (0)
|
#define C10_HAS_CPP_ATTRIBUTE(x) (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// C10_NODISCARD - Warn if a type or return value is discarded.
|
|
||||||
#define C10_NODISCARD [[nodiscard]]
|
|
||||||
|
|
||||||
// suppress an unused variable.
|
// suppress an unused variable.
|
||||||
#define C10_UNUSED [[maybe_unused]]
|
#define C10_UNUSED [[maybe_unused]]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ class C10_API SmallVectorBase {
|
||||||
return Capacity;
|
return Capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD bool empty() const {
|
[[nodiscard]] bool empty() const {
|
||||||
return !Size;
|
return !Size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -710,7 +710,7 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T> {
|
||||||
this->set_size(this->size() - NumItems);
|
this->set_size(this->size() - NumItems);
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD T pop_back_val() {
|
[[nodiscard]] T pop_back_val() {
|
||||||
T Result = ::std::move(this->back());
|
T Result = ::std::move(this->back());
|
||||||
this->pop_back();
|
this->pop_back();
|
||||||
return Result;
|
return Result;
|
||||||
|
|
|
||||||
|
|
@ -69,7 +69,7 @@ class UniqueVoidPtr {
|
||||||
std::unique_ptr<void, DeleterFnPtr>&& move_context() {
|
std::unique_ptr<void, DeleterFnPtr>&& move_context() {
|
||||||
return std::move(ctx_);
|
return std::move(ctx_);
|
||||||
}
|
}
|
||||||
C10_NODISCARD bool compare_exchange_deleter(
|
[[nodiscard]] bool compare_exchange_deleter(
|
||||||
DeleterFnPtr expected_deleter,
|
DeleterFnPtr expected_deleter,
|
||||||
DeleterFnPtr new_deleter) {
|
DeleterFnPtr new_deleter) {
|
||||||
if (get_deleter() != expected_deleter)
|
if (get_deleter() != expected_deleter)
|
||||||
|
|
|
||||||
|
|
@ -149,7 +149,7 @@ class basic_string_view final {
|
||||||
return std::numeric_limits<difference_type>::max();
|
return std::numeric_limits<difference_type>::max();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD constexpr bool empty() const noexcept {
|
[[nodiscard]] constexpr bool empty() const noexcept {
|
||||||
return size() == 0;
|
return size() == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -123,7 +123,7 @@ class IntrusivePtrNoGilDestructor {
|
||||||
T* operator->() const noexcept {
|
T* operator->() const noexcept {
|
||||||
return impl_.get();
|
return impl_.get();
|
||||||
}
|
}
|
||||||
C10_NODISCARD T* get() const noexcept {
|
[[nodiscard]] T* get() const noexcept {
|
||||||
return impl_.get();
|
return impl_.get();
|
||||||
}
|
}
|
||||||
void reset() noexcept {
|
void reset() noexcept {
|
||||||
|
|
|
||||||
|
|
@ -22,10 +22,10 @@ struct TORCH_API InputSpec {
|
||||||
explicit InputSpec(const c10::IValue& value);
|
explicit InputSpec(const c10::IValue& value);
|
||||||
|
|
||||||
// Serialize the spec into an IValue.
|
// Serialize the spec into an IValue.
|
||||||
C10_NODISCARD c10::IValue serialize() const;
|
[[nodiscard]] c10::IValue serialize() const;
|
||||||
|
|
||||||
// Check whether the input tensor adheres to the spec.
|
// Check whether the input tensor adheres to the spec.
|
||||||
C10_NODISCARD bool validate(const at::Tensor& input) const;
|
[[nodiscard]] bool validate(const at::Tensor& input) const;
|
||||||
|
|
||||||
std::vector<int64_t> sizes_;
|
std::vector<int64_t> sizes_;
|
||||||
c10::ScalarType dtype_{c10::ScalarType::Undefined};
|
c10::ScalarType dtype_{c10::ScalarType::Undefined};
|
||||||
|
|
@ -40,10 +40,10 @@ struct TORCH_API OutputSpec {
|
||||||
explicit OutputSpec(const c10::IValue& value);
|
explicit OutputSpec(const c10::IValue& value);
|
||||||
|
|
||||||
// Serialize the spec into an IValue.
|
// Serialize the spec into an IValue.
|
||||||
C10_NODISCARD c10::IValue serialize() const;
|
[[nodiscard]] c10::IValue serialize() const;
|
||||||
|
|
||||||
// Allocate an output tensor in accordance with the spec.
|
// Allocate an output tensor in accordance with the spec.
|
||||||
C10_NODISCARD at::Tensor allocate() const;
|
[[nodiscard]] at::Tensor allocate() const;
|
||||||
|
|
||||||
std::vector<int64_t> sizes_;
|
std::vector<int64_t> sizes_;
|
||||||
c10::ScalarType dtype_{c10::ScalarType::Undefined};
|
c10::ScalarType dtype_{c10::ScalarType::Undefined};
|
||||||
|
|
@ -84,7 +84,7 @@ struct TORCH_API MemoryPlan {
|
||||||
|
|
||||||
explicit MemoryPlan(const c10::IValue& value);
|
explicit MemoryPlan(const c10::IValue& value);
|
||||||
|
|
||||||
C10_NODISCARD c10::IValue serialize() const;
|
[[nodiscard]] c10::IValue serialize() const;
|
||||||
|
|
||||||
void allocate(ExecutionState* state) const;
|
void allocate(ExecutionState* state) const;
|
||||||
|
|
||||||
|
|
@ -207,10 +207,10 @@ class TORCH_API CompilationUnit {
|
||||||
// Serialize all registered functions into an IValue. The IValue will be save
|
// Serialize all registered functions into an IValue. The IValue will be save
|
||||||
// into the compiled TorchScript model file ahead-of-time on the host, and
|
// into the compiled TorchScript model file ahead-of-time on the host, and
|
||||||
// will be deserialized at runtime on the target device.
|
// will be deserialized at runtime on the target device.
|
||||||
C10_NODISCARD c10::IValue serialize() const;
|
[[nodiscard]] c10::IValue serialize() const;
|
||||||
|
|
||||||
// Execute a registered function.
|
// Execute a registered function.
|
||||||
C10_NODISCARD c10::impl::GenericList run(
|
[[nodiscard]] c10::impl::GenericList run(
|
||||||
const c10::QualifiedName& function_name,
|
const c10::QualifiedName& function_name,
|
||||||
const c10::impl::GenericList& inputs) const;
|
const c10::impl::GenericList& inputs) const;
|
||||||
|
|
||||||
|
|
@ -218,7 +218,7 @@ class TORCH_API CompilationUnit {
|
||||||
void register_function(std::unique_ptr<Function> fn);
|
void register_function(std::unique_ptr<Function> fn);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
C10_NODISCARD Function* find_function(const c10::QualifiedName& qn) const;
|
[[nodiscard]] Function* find_function(const c10::QualifiedName& qn) const;
|
||||||
|
|
||||||
std::unordered_map<c10::QualifiedName, std::unique_ptr<Function>> functions_;
|
std::unordered_map<c10::QualifiedName, std::unique_ptr<Function>> functions_;
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -336,7 +336,7 @@ void TypeParser::advance() {
|
||||||
lex();
|
lex();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD c10::string_view TypeParser::cur() const {
|
[[nodiscard]] c10::string_view TypeParser::cur() const {
|
||||||
return next_token_;
|
return next_token_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ class TORCH_API TypeParser {
|
||||||
std::string next();
|
std::string next();
|
||||||
c10::string_view nextView();
|
c10::string_view nextView();
|
||||||
void advance();
|
void advance();
|
||||||
C10_NODISCARD c10::string_view cur() const;
|
[[nodiscard]] c10::string_view cur() const;
|
||||||
|
|
||||||
std::string pythonStr_;
|
std::string pythonStr_;
|
||||||
size_t start_;
|
size_t start_;
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ class ProcessedNodeInputs {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD uint16_t size() const {
|
[[nodiscard]] uint16_t size() const {
|
||||||
if (C10_LIKELY(repr_.is_inline())) {
|
if (C10_LIKELY(repr_.is_inline())) {
|
||||||
return repr_.inline_repr_.size;
|
return repr_.inline_repr_.size;
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -51,7 +51,7 @@ class ProcessedNodeInputs {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD bool empty() const {
|
[[nodiscard]] bool empty() const {
|
||||||
return size() == 0;
|
return size() == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -93,11 +93,11 @@ class ProcessedNodeInputs {
|
||||||
HeapArrayPtr(HeapArrayPtr&&) noexcept = default;
|
HeapArrayPtr(HeapArrayPtr&&) noexcept = default;
|
||||||
HeapArrayPtr& operator=(HeapArrayPtr&&) noexcept = default;
|
HeapArrayPtr& operator=(HeapArrayPtr&&) noexcept = default;
|
||||||
|
|
||||||
C10_NODISCARD bool empty() const {
|
[[nodiscard]] bool empty() const {
|
||||||
return size() != 0;
|
return size() != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD uint16_t size() const {
|
[[nodiscard]] uint16_t size() const {
|
||||||
return array_ ? array_[0] : 0;
|
return array_ ? array_[0] : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -137,7 +137,7 @@ class ProcessedNodeInputs {
|
||||||
// awkward.
|
// awkward.
|
||||||
#pragma pack(push, 2)
|
#pragma pack(push, 2)
|
||||||
union Repr {
|
union Repr {
|
||||||
C10_NODISCARD bool is_inline() const {
|
[[nodiscard]] bool is_inline() const {
|
||||||
uint8_t tag = 0;
|
uint8_t tag = 0;
|
||||||
// Use of reinterpret_cast to pointer to char or unsigned char
|
// Use of reinterpret_cast to pointer to char or unsigned char
|
||||||
// is defined behavior; see
|
// is defined behavior; see
|
||||||
|
|
|
||||||
|
|
@ -456,7 +456,7 @@ class TORCH_API StaticModule {
|
||||||
return num_inputs() + num_constants() + num_intermediate_values();
|
return num_inputs() + num_constants() + num_intermediate_values();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const std::vector<uint16_t>& output_indices() const {
|
[[nodiscard]] const std::vector<uint16_t>& output_indices() const {
|
||||||
return output_indices_;
|
return output_indices_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -488,7 +488,7 @@ class TORCH_API StaticModule {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD Node* findNodeWithKindForTesting(const std::string& kind) const;
|
[[nodiscard]] Node* findNodeWithKindForTesting(const std::string& kind) const;
|
||||||
|
|
||||||
const std::optional<c10::FunctionSchema>& schema() const {
|
const std::optional<c10::FunctionSchema>& schema() const {
|
||||||
return schema_;
|
return schema_;
|
||||||
|
|
@ -644,7 +644,7 @@ class TORCH_API BlockRunner {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Output is readonly. The writing process happens inside ProcessedNodes
|
// Output is readonly. The writing process happens inside ProcessedNodes
|
||||||
C10_NODISCARD const IValue& Output(uint32_t i) const {
|
[[nodiscard]] const IValue& Output(uint32_t i) const {
|
||||||
DCHECK(i < outputs_.size());
|
DCHECK(i < outputs_.size());
|
||||||
return *outputs_[i];
|
return *outputs_[i];
|
||||||
}
|
}
|
||||||
|
|
@ -923,7 +923,7 @@ class TORCH_API ProcessedNode {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Input is readonly
|
// Input is readonly
|
||||||
C10_NODISCARD const IValue& Input(uint32_t i) const {
|
[[nodiscard]] const IValue& Input(uint32_t i) const {
|
||||||
return values_[inputs_[i]];
|
return values_[inputs_[i]];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -933,7 +933,7 @@ class TORCH_API ProcessedNode {
|
||||||
return values_[outputs_offset_ + i];
|
return values_[outputs_offset_ + i];
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD const IValue& Output(uint32_t i) const {
|
[[nodiscard]] const IValue& Output(uint32_t i) const {
|
||||||
DCHECK(i < num_outputs());
|
DCHECK(i < num_outputs());
|
||||||
return values_[outputs_offset_ + i];
|
return values_[outputs_offset_ + i];
|
||||||
}
|
}
|
||||||
|
|
@ -943,12 +943,12 @@ class TORCH_API ProcessedNode {
|
||||||
return static_cast<uint32_t>(fn_->num_outputs());
|
return static_cast<uint32_t>(fn_->num_outputs());
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD c10::ArrayRef<const IValue> outputs() const {
|
[[nodiscard]] c10::ArrayRef<const IValue> outputs() const {
|
||||||
return c10::ArrayRef<const IValue>(
|
return c10::ArrayRef<const IValue>(
|
||||||
values_ + outputs_offset_, num_outputs());
|
values_ + outputs_offset_, num_outputs());
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD uint16_t num_inputs() const {
|
[[nodiscard]] uint16_t num_inputs() const {
|
||||||
return inputs_.size();
|
return inputs_.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -990,7 +990,7 @@ class TORCH_API ProcessedNode {
|
||||||
values_ = values;
|
values_ = values;
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD uint16_t output_ivalue_index(uint16_t i) const {
|
[[nodiscard]] uint16_t output_ivalue_index(uint16_t i) const {
|
||||||
DCHECK(i < num_outputs());
|
DCHECK(i < num_outputs());
|
||||||
return outputs_offset_ + i;
|
return outputs_offset_ + i;
|
||||||
}
|
}
|
||||||
|
|
@ -1019,9 +1019,9 @@ class TORCH_API ProcessedNode {
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
C10_NODISCARD bool verify_outputs_dont_overlap_each_other() const;
|
[[nodiscard]] bool verify_outputs_dont_overlap_each_other() const;
|
||||||
|
|
||||||
C10_NODISCARD bool verify_inputs_dont_overlap_outputs(bool force_check) const;
|
[[nodiscard]] bool verify_inputs_dont_overlap_outputs(bool force_check) const;
|
||||||
|
|
||||||
Node* node_;
|
Node* node_;
|
||||||
const ProcessedFunction* fn_;
|
const ProcessedFunction* fn_;
|
||||||
|
|
|
||||||
|
|
@ -172,15 +172,15 @@ class MemoryPlanner {
|
||||||
return managed_output_tensors_.size();
|
return managed_output_tensors_.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD size_t total_num_unmanaged() const {
|
[[nodiscard]] size_t total_num_unmanaged() const {
|
||||||
return num_unmanaged_non_scalars() + num_unmanaged_scalars();
|
return num_unmanaged_non_scalars() + num_unmanaged_scalars();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD size_t num_unmanaged_non_scalars() const {
|
[[nodiscard]] size_t num_unmanaged_non_scalars() const {
|
||||||
return unmanaged_ivalues_.size() + unmanaged_borrowed_ivalues_.size();
|
return unmanaged_ivalues_.size() + unmanaged_borrowed_ivalues_.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
C10_NODISCARD size_t num_unmanaged_scalars() const {
|
[[nodiscard]] size_t num_unmanaged_scalars() const {
|
||||||
return num_unmanaged_scalar_ivalues_;
|
return num_unmanaged_scalar_ivalues_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -32,15 +32,15 @@ class TORCH_API DetachedBuffer final {
|
||||||
: data_(data), size_(size), data_owner_(internal_data_owner) {}
|
: data_(data), size_(size), data_owner_(internal_data_owner) {}
|
||||||
|
|
||||||
/// Returns a pointer to the data.
|
/// Returns a pointer to the data.
|
||||||
C10_NODISCARD void* data() {
|
[[nodiscard]] void* data() {
|
||||||
return data_;
|
return data_;
|
||||||
}
|
}
|
||||||
/// Returns a pointer to the data.
|
/// Returns a pointer to the data.
|
||||||
C10_NODISCARD const void* data() const {
|
[[nodiscard]] const void* data() const {
|
||||||
return data_;
|
return data_;
|
||||||
}
|
}
|
||||||
/// Returns the size of the data, in bytes.
|
/// Returns the size of the data, in bytes.
|
||||||
C10_NODISCARD size_t size() const {
|
[[nodiscard]] size_t size() const {
|
||||||
return size_;
|
return size_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user