Fixup c10 headers with clang-tidy (#91407)

Clang-tidy was not applied properly to headers in c10 as documented #91406. These are the easy automated fixes that came out of applying clang-tidy to the c10 part of the code base. cc @ezyang
Pull Request resolved: https://github.com/pytorch/pytorch/pull/91407
Approved by: https://github.com/ezyang
This commit is contained in:
Aaron Gokaslan 2022-12-28 11:12:22 +00:00 committed by PyTorch MergeBot
parent c470ad4f4a
commit 700941f683
12 changed files with 27 additions and 23 deletions

View File

@ -226,7 +226,7 @@ struct AllocatorRegisterer {
// per device // per device
struct C10_API MemoryReportingInfoBase : public c10::DebugInfoBase { struct C10_API MemoryReportingInfoBase : public c10::DebugInfoBase {
MemoryReportingInfoBase(); MemoryReportingInfoBase();
virtual ~MemoryReportingInfoBase() {} ~MemoryReportingInfoBase() override = default;
/** /**
* alloc_size corresponds to the size of the ptr. * alloc_size corresponds to the size of the ptr.

View File

@ -14,12 +14,12 @@ struct FunctionalityOffsetAndMask {
FunctionalityOffsetAndMask(uint16_t offset, uint16_t mask) FunctionalityOffsetAndMask(uint16_t offset, uint16_t mask)
: offset(offset), mask(mask) {} : offset(offset), mask(mask) {}
// This needs to big enough to cover the size of the operator table. // This needs to big enough to cover the size of the operator table.
uint16_t offset; uint16_t offset{};
// See Note [No More Than 16 Backends] // See Note [No More Than 16 Backends]
// This mask needs to be big enough to mask all of the backend bits. // This mask needs to be big enough to mask all of the backend bits.
// We probably don't ever want to have more than 16 backend bits, so uint16_t // We probably don't ever want to have more than 16 backend bits, so uint16_t
// should be enough. // should be enough.
uint16_t mask; uint16_t mask{};
}; };
static_assert( static_assert(
c10::num_runtime_entries < 65536, c10::num_runtime_entries < 65536,

View File

@ -66,7 +66,7 @@ struct TORCH_API InferenceMode {
DispatchKeySet excluded = enabled DispatchKeySet excluded = enabled
? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset) ? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset)
: (prev_keyset.excluded_ - c10::autograd_dispatch_keyset); : (prev_keyset.excluded_ - c10::autograd_dispatch_keyset);
c10::impl::PODLocalDispatchKeySet cur_keyset; c10::impl::PODLocalDispatchKeySet cur_keyset{};
cur_keyset.set_included(included); cur_keyset.set_included(included);
cur_keyset.set_excluded(excluded); cur_keyset.set_excluded(excluded);
c10::impl::_force_tls_local_dispatch_key_set(cur_keyset); c10::impl::_force_tls_local_dispatch_key_set(cur_keyset);

View File

@ -292,7 +292,7 @@ class C10_API Scalar {
Tag tag; Tag tag;
union v_t { union v_t {
double d; double d{};
int64_t i; int64_t i;
c10::complex<double> z; c10::complex<double> z;
c10::intrusive_ptr_target* p; c10::intrusive_ptr_target* p;

View File

@ -19,7 +19,7 @@ inline at::IntArrayRef asIntArrayRefUnchecked(c10::SymIntArrayRef ar) {
inline c10::optional<at::IntArrayRef> asIntArrayRefSlowOpt( inline c10::optional<at::IntArrayRef> asIntArrayRefSlowOpt(
c10::SymIntArrayRef ar) { c10::SymIntArrayRef ar) {
for (c10::SymInt sci : ar) { for (const c10::SymInt& sci : ar) {
if (sci.is_symbolic()) { if (sci.is_symbolic()) {
return c10::nullopt; return c10::nullopt;
} }
@ -32,7 +32,7 @@ inline at::IntArrayRef asIntArrayRefSlow(
c10::SymIntArrayRef ar, c10::SymIntArrayRef ar,
const char* file, const char* file,
int64_t line) { int64_t line) {
for (c10::SymInt sci : ar) { for (const c10::SymInt& sci : ar) {
TORCH_CHECK( TORCH_CHECK(
!sci.is_symbolic(), !sci.is_symbolic(),
file, file,

View File

@ -582,7 +582,7 @@ inline TensorOptions layout(Layout layout) {
/// Convenience function that returns a `TensorOptions` object with the `device` /// Convenience function that returns a `TensorOptions` object with the `device`
/// set to the given one. /// set to the given one.
inline TensorOptions device(Device device) { inline TensorOptions device(Device device) {
return TensorOptions().device(std::move(device)); return TensorOptions().device(device);
} }
/// Convenience function that returns a `TensorOptions` object with the /// Convenience function that returns a `TensorOptions` object with the

View File

@ -76,8 +76,8 @@ struct __array_traits<_Tp, 0> final {
} }
}; };
[[noreturn]] inline void __throw_out_of_range(std::string msg) { [[noreturn]] inline void __throw_out_of_range(const std::string& msg) {
throw std::out_of_range(std::move(msg)); throw std::out_of_range(msg);
} }
} // namespace detail } // namespace detail

View File

@ -11,14 +11,16 @@ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
namespace c10 { namespace c10 {
/// Constructors /// Constructors
inline C10_HOST_DEVICE BFloat16::BFloat16(float value) { inline C10_HOST_DEVICE BFloat16::BFloat16(float value)
:
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 && \ #if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 && \
defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
x = __bfloat16_as_ushort(__float2bfloat16(value)); x(__bfloat16_as_ushort(__float2bfloat16(value)))
#else #else
// RNE by default // RNE by default
x = detail::round_to_nearest_even(value); x(detail::round_to_nearest_even(value))
#endif #endif
{
} }
/// Implicit conversions /// Implicit conversions

View File

@ -27,14 +27,16 @@ namespace c10 {
/// Constructors /// Constructors
inline C10_HOST_DEVICE Half::Half(float value) { inline C10_HOST_DEVICE Half::Half(float value)
:
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
x = __half_as_short(__float2half(value)); x(__half_as_short(__float2half(value)))
#elif defined(__SYCL_DEVICE_ONLY__) #elif defined(__SYCL_DEVICE_ONLY__)
x = sycl::bit_cast<uint16_t>(sycl::half(value)); x(sycl::bit_cast<uint16_t>(sycl::half(value)))
#else #else
x = detail::fp16_ieee_from_fp32_value(value); x(detail::fp16_ieee_from_fp32_value(value))
#endif #endif
{
} }
/// Implicit conversions /// Implicit conversions

View File

@ -172,7 +172,7 @@ class bad_optional_access : public std::logic_error {
template <class T> template <class T>
union storage_t { union storage_t {
unsigned char dummy_; unsigned char dummy_{};
T value_; T value_;
#if __cplusplus >= 202002L #if __cplusplus >= 202002L

View File

@ -451,7 +451,7 @@ class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
void growAndAssign(size_t NumElts, const T& Elt) { void growAndAssign(size_t NumElts, const T& Elt) {
// Grow manually in case Elt is an internal reference. // Grow manually in case Elt is an internal reference.
size_t NewCapacity; size_t NewCapacity = 0;
T* NewElts = mallocForGrow(NumElts, NewCapacity); T* NewElts = mallocForGrow(NumElts, NewCapacity);
std::uninitialized_fill_n(NewElts, NumElts, Elt); std::uninitialized_fill_n(NewElts, NumElts, Elt);
this->destroy_range(this->begin(), this->end()); this->destroy_range(this->begin(), this->end());
@ -462,7 +462,7 @@ class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
template <typename... ArgTypes> template <typename... ArgTypes>
T& growAndEmplaceBack(ArgTypes&&... Args) { T& growAndEmplaceBack(ArgTypes&&... Args) {
// Grow manually in case one of Args is an internal reference. // Grow manually in case one of Args is an internal reference.
size_t NewCapacity; size_t NewCapacity = 0;
T* NewElts = mallocForGrow(0, NewCapacity); T* NewElts = mallocForGrow(0, NewCapacity);
::new ((void*)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...); ::new ((void*)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
moveElementsForGrow(NewElts); moveElementsForGrow(NewElts);
@ -493,7 +493,7 @@ class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
// Define this out-of-line to dissuade the C++ compiler from inlining it. // Define this out-of-line to dissuade the C++ compiler from inlining it.
template <typename T, bool TriviallyCopyable> template <typename T, bool TriviallyCopyable>
void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) { void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
size_t NewCapacity; size_t NewCapacity = 0;
T* NewElts = mallocForGrow(MinSize, NewCapacity); T* NewElts = mallocForGrow(MinSize, NewCapacity);
moveElementsForGrow(NewElts); moveElementsForGrow(NewElts);
takeAllocationForGrow(NewElts, NewCapacity); takeAllocationForGrow(NewElts, NewCapacity);

View File

@ -668,7 +668,7 @@ class basic_string_view final {
}; };
const_pointer begin_; const_pointer begin_;
size_type size_; size_type size_{};
}; };
template <class CharT> template <class CharT>