Modernize C++ code in aten/src/ATen/ (#141424)

Clang-tidy modernize checkers were applied, and most changes were concatenation of namespaces.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/141424
Approved by: https://github.com/eqy
This commit is contained in:
cyy 2024-11-24 02:15:17 +00:00 committed by PyTorch MergeBot
parent ba5253da9b
commit 4c1f50af5f
81 changed files with 131 additions and 277 deletions

View File

@ -12,8 +12,7 @@
#include <c10/util/Metaprogramming.h>
#include <type_traits>
namespace c10 {
namespace impl {
namespace c10::impl {
//
// utils
@ -391,5 +390,4 @@ struct BoxedKernelWrapper<
}
};
} // impl
} // c10

View File

@ -4,8 +4,7 @@
namespace c10 {
namespace detail {
namespace infer_schema {
namespace detail::infer_schema {
namespace {
std::vector<Argument> createArgumentVector(c10::ArrayRef<ArgumentDef> args) {
@ -40,7 +39,6 @@ FunctionSchema make_function_schema(
c10::ArrayRef<ArgumentDef> returns) {
return make_function_schema("", "", arguments, returns);
}
} // namespace infer_schema
} // namespace detail
std::optional<std::string> findSchemaDifferences(

View File

@ -9,9 +9,7 @@
#include <c10/util/Metaprogramming.h>
namespace c10 {
namespace detail {
namespace infer_schema {
namespace detail::infer_schema {
/// The templated inference code creates `ArgumentDef` instead of `Argument`,
/// because that can be constructed at compile time and has a much smaller
@ -142,7 +140,6 @@ FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, st
return make_function_schema(std::move(name), std::move(overload_name), arguments, returns);
}
}
}
template<class FuncType>

View File

@ -1,7 +1,7 @@
#include <ATen/cuda/nvrtc_stub/ATenNVRTC.h>
#include <iostream>
namespace at { namespace cuda {
namespace at::cuda {
NVRTC* load_nvrtc() {
auto self = new NVRTC();
@ -10,4 +10,4 @@ NVRTC* load_nvrtc() {
return self;
}
}} // at::cuda
} // at::cuda

View File

@ -4,7 +4,7 @@
#include <cuda.h>
#include <nvrtc.h>
namespace at { namespace cuda {
namespace at::cuda {
// NOTE [ USE OF NVRTC AND DRIVER API ]
@ -132,4 +132,4 @@ extern "C" typedef struct NVRTC {
} NVRTC;
extern "C" TORCH_CUDA_CPP_API NVRTC* load_nvrtc();
}} // at::cuda
} // at::cuda

View File

@ -5,7 +5,7 @@
// Use of c10::hip namespace here makes hipification easier, because
// I don't have to also fix namespaces. Sorry!
namespace c10 { namespace hip {
namespace c10::hip {
// Takes a valid HIPAllocator (of any sort) and turns it into
// an allocator pretending to be a CUDA allocator. See
@ -28,4 +28,4 @@ public:
}
};
}} // namespace c10::hip
} // namespace c10::hip

View File

@ -155,7 +155,7 @@ C10_DEVICE scalar_t binomial_inversion(scalar_t count, scalar_t prob, BaseSample
accscalar_t logprob = compat_log1p(-prob);
while (1) {
while (true) {
U = standard_uniform.sample();
accscalar_t geom = compat_ceil(compat_log(U) / logprob);
geom_sum += geom;
@ -185,7 +185,7 @@ C10_DEVICE scalar_t btrs(scalar_t count, scalar_t prob, BaseSampler<accscalar_t,
const accscalar_t alpha = (2.83 + 5.1 / b) * stddev;
const accscalar_t m = compat_floor((count + 1) * prob);
while (1) {
while (true) {
U = standard_uniform.sample() - 0.5;
V = standard_uniform.sample();

View File

@ -1,8 +1,7 @@
#include <ATen/core/Tensor.h>
#include <c10/util/Exception.h>
namespace at {
namespace native {
namespace at::native {
inline void check_pixel_shuffle_shapes(const Tensor& self, int64_t upscale_factor) {
TORCH_CHECK(self.dim() >= 3,
@ -44,4 +43,4 @@ inline void check_pixel_unshuffle_shapes(const Tensor& self, int64_t downscale_f
downscale_factor);
}
}} // namespace at::native
} // namespace at::native

View File

@ -63,7 +63,7 @@ inline C10_DEVICE scalar_t min_propagate_nan(scalar_t a, scalar_t b) {
#define compat_pow std::pow
#endif
namespace at { namespace native {
namespace at::native {
namespace detail {
@ -539,7 +539,7 @@ struct MinMaxOps {
#endif
};
}} // namespace at::native
} // namespace at::native
#undef MAX
#undef MIN

View File

@ -8,8 +8,7 @@
#include <ATen/native/ao_sparse/quantized/cpu/packed_params.h>
#include <ATen/native/ao_sparse/quantized/cpu/qnnpack_utils.h>
namespace ao {
namespace sparse {
namespace ao::sparse {
int register_linear_params() {
static auto register_linear_params =
torch::selective_class_<LinearPackedParamsBase>(
@ -42,4 +41,4 @@ int register_linear_params() {
namespace {
[[maybe_unused]] static auto linear_params = register_linear_params();
} // namespace
}} // namespace ao::sparse
} // namespace ao::sparse

View File

@ -90,7 +90,6 @@ struct TORCH_API PackedLinearWeight
#endif // USE_FBGEMM
namespace ao {
namespace sparse {
namespace ao::sparse {
int register_linear_params();
}} // namespace ao::sparse
} // namespace ao::sparse

View File

@ -4,8 +4,7 @@
#include <ATen/core/ivalue.h>
namespace ao {
namespace sparse {
namespace ao::sparse {
// <Weight, bias, out_features_block_size, in_features_block_size>
using LinearPackedSerializationType =
@ -72,4 +71,4 @@ struct LinearPackedParamsBase : public torch::jit::CustomClassHolder {
const int64_t out_features_block_size_, in_features_block_size_;
};
}} // namespace ao::sparse
} // namespace ao::sparse

View File

@ -15,8 +15,7 @@
#include <ATen/ops/empty.h>
#endif
namespace ao {
namespace sparse {
namespace ao::sparse {
int register_linear_params();
@ -257,4 +256,4 @@ TORCH_LIBRARY_IMPL(sparse, QuantizedCPU, m) {
}
} // namespace
}} // namespace ao::sparse
} // namespace ao::sparse

View File

@ -7,8 +7,7 @@
#include <ATen/native/ao_sparse/quantized/cpu/qnnpack_utils.h>
#endif
namespace ao {
namespace sparse {
namespace ao::sparse {
namespace {
constexpr int64_t serialization_version_index [[maybe_unused]] = 0;
@ -317,5 +316,4 @@ PackedLinearWeightQnnp::PackedLinearWeightQnnp(
}
#endif // USE_PYTORCH_QNNPACK
} // namespace sparse
} // namespace ao

View File

@ -18,8 +18,7 @@
#include <ATen/ops/empty.h>
#endif
namespace ao {
namespace sparse {
namespace ao::sparse {
int register_linear_params();
@ -195,4 +194,4 @@ TORCH_LIBRARY_IMPL(sparse, CPU, m) {
}
} // namespace
}} // namespace ao::sparse
} // namespace ao::sparse

View File

@ -18,8 +18,7 @@
#include <algorithm>
namespace ao {
namespace sparse {
namespace ao::sparse {
int register_linear_params();
@ -246,4 +245,4 @@ TORCH_LIBRARY_IMPL(sparse, QuantizedCPU, m) {
TORCH_FN(QLinearPackWeightInt8::run));
}
} // namespace
}} // namespace ao::sparse
} // namespace ao::sparse

View File

@ -9,8 +9,7 @@
#include <utility>
#endif
namespace ao {
namespace sparse {
namespace ao::sparse {
namespace {
/**
@ -245,5 +244,4 @@ BCSRSerializationType PackedLinearWeightQnnp::serialize() {
#endif // USE_PYTORCH_QNNPACK
} // namespace sparse
} // namespace ao

View File

@ -15,8 +15,7 @@
#include <ATen/ops/from_blob.h>
#endif
namespace ao {
namespace sparse {
namespace ao::sparse {
int register_linear_params();
#ifdef USE_FBGEMM
@ -139,4 +138,4 @@ TORCH_LIBRARY_IMPL(sparse, CatchAll, m) {
TORCH_FN(QLinearUnpackWeightInt8::run));
}
} // namespace
}} // namespace ao::sparse
} // namespace ao::sparse

View File

@ -11,8 +11,7 @@
#include <pack_block_sparse.h>
#include <ATen/native/ao_sparse/quantized/cpu/packed_params.h>
namespace ao {
namespace sparse {
namespace ao::sparse {
struct TORCH_API PackedLinearWeightQnnp
: public LinearPackedParamsBase {
@ -87,6 +86,6 @@ struct TORCH_API PackedLinearWeightQnnp
at::Tensor apply_dynamic_impl(const at::Tensor& input);
};
}} // namespace ao::sparse
} // namespace ao::sparse
#endif // USE_PYTORCH_QNNPACK

View File

@ -7,7 +7,7 @@ struct TensorIteratorBase;
class TensorBase;
}
namespace at { namespace native {
namespace at::native {
void launch_glu_backward_kernel(const TensorIteratorBase& iter,
int64_t gI_stride, int64_t I_stride);
@ -17,4 +17,4 @@ void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter);
void GeluCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate);
}} // namespace at::native
} // namespace at::native

View File

@ -6,8 +6,7 @@ namespace at {
class TensorBase;
}
namespace at {
namespace native {
namespace at::native {
void launch_grid_sampler_2d_forward_kernel(
const TensorBase &output, const TensorBase &input, const TensorBase &grid,
@ -29,4 +28,4 @@ void launch_grid_sampler_3d_backward_kernel(
const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners, std::array<bool, 2> output_mask);
}} // namespace at::native
} // namespace at::native

View File

@ -7,10 +7,9 @@ struct TensorIteratorBase;
class TensorBase;
}
namespace at {
namespace native {
namespace at::native {
/// @param maskPrefixSum[in,out]
void launch_masked_scatter_kernel(
const TensorBase &self, const TensorBase &mask,
const TensorBase &maskPrefixSum, const TensorBase &source);
}}
}

View File

@ -7,7 +7,7 @@ namespace c10 {
class Scalar;
}
namespace at { namespace native {
namespace at::native {
void norm_launch_kernel(TensorIterator &iter, double val);
void min_launch_kernel(TensorIterator &iter);
@ -17,4 +17,4 @@ void min_all_launch_kernel(TensorIterator &iter);
void max_all_launch_kernel(TensorIterator &iter);
void aminmax_allreduce_launch_kernel(TensorIterator &iter);
}} // namespace at::native
} // namespace at::native

View File

@ -5,8 +5,7 @@ namespace at {
class TensorBase;
}
namespace at {
namespace native {
namespace at::native {
void launch_kthvalue_kernel(
const TensorBase &values, const TensorBase &indices,
@ -15,4 +14,4 @@ void launch_median_kernel(
const TensorBase &vals, const TensorBase &inds,
const TensorBase &in, int64_t dim, bool ignore_nan);
}} // namespace at::native
} // namespace at::native

View File

@ -5,8 +5,7 @@ namespace at {
class TensorBase;
}
namespace at {
namespace native {
namespace at::native {
void launch_fused_mode_kernel(
const TensorBase &values, const TensorBase &indices,
@ -16,4 +15,4 @@ void launch_apply_mode_kernel(
const TensorBase &values, const TensorBase &indices,
const TensorBase &self, int64_t dim, int64_t ndim);
}} // namespace at::native
} // namespace at::native

View File

@ -5,10 +5,9 @@ namespace at {
class TensorBase;
}
namespace at {
namespace native {
namespace at::native {
void launch_gather_topk_kernel(
const TensorBase& self,
int64_t k, int64_t dim, bool largest,
const TensorBase& values, const TensorBase& indices);
}}
}

View File

@ -12,8 +12,7 @@
#include <ATen/ops/cudnn_convolution_transpose_native.h>
#endif
namespace at {
namespace native {
namespace at::native {
// ---------------------------------------------------------------------
//
@ -244,5 +243,4 @@ Tensor cudnn_convolution_add_relu(
#endif // AT_CUDNN_ENABLED
} // namespace native
} // namespace at
} // namespace at::native

View File

@ -1,8 +1,7 @@
#pragma once
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace at::native {
void run_cudnn_SDP_fprop(
int64_t b,
@ -47,5 +46,4 @@ void run_cudnn_SDP_bprop(
const Tensor& dropoutseed,
const Tensor& dropoutoffset);
} // namespace native
} // namespace at
} // namespace at::native

View File

@ -6,7 +6,7 @@
namespace at::native::metal {
struct Conv2DParams final {
Conv2DParams() {}
Conv2DParams() = default;
Conv2DParams(
c10::IntArrayRef inputSizes,
c10::IntArrayRef weightSizes,

View File

@ -1,8 +1,7 @@
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <c10/macros/Macros.h>
namespace at {
namespace detail {
namespace at::detail {
struct MetalGuardImpl final : public c10::impl::DeviceGuardImplInterface {
MetalGuardImpl() = default;
@ -60,5 +59,4 @@ struct MetalGuardImpl final : public c10::impl::DeviceGuardImplInterface {
C10_REGISTER_GUARD_IMPL(Metal, MetalGuardImpl)
} // namespace detail
} // namespace at

View File

@ -17,7 +17,7 @@
#if !AT_ROCM_ENABLED()
namespace at { namespace native {
namespace at::native {
// See Note [ATen preprocessor philosophy]
@ -33,7 +33,7 @@ std::tuple<Tensor, Tensor, Tensor> miopen_batch_norm_backward(
TORCH_CHECK(false, "miopen_batch_norm_backward: ATen not compiled with MIOpen support");
}
}} // namespace at::native
} // namespace at::native
#else // AT_ROCM_ENABLED

View File

@ -25,7 +25,7 @@
#if !AT_ROCM_ENABLED()
namespace at { namespace native {
namespace at::native {
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> miopen_rnn(
const Tensor& input_r, TensorList weight, int64_t weight_stride0,
@ -46,7 +46,7 @@ namespace at { namespace native {
TORCH_CHECK(false, "miopen_rnn_backward: ATen not compiled with MIOpen support.");
}
}} //namespace at::native
} //namespace at::native
#else // AT_ROCM_ENABLED()

View File

@ -62,7 +62,7 @@ void mkl_gemm_f16f16f32(
#include <mkl.h>
#include <c10/util/irange.h>
namespace at { namespace native {
namespace at::native {
static CBLAS_TRANSPOSE to_cblas(TransposeType x) {
switch (x) {
@ -151,7 +151,7 @@ void mkl_gemm_f16f16f32(
#endif
}
}} // namespace at::native
} // namespace at::native
#endif
C10_DIAGNOSTIC_POP()

View File

@ -10,8 +10,7 @@
#include <mkl.h>
#endif
namespace at {
namespace native {
namespace at::native {
void mkl_gemm_batched(
TransposeType trans_A, TransposeType trans_B,
@ -48,4 +47,4 @@ void mkl_gemm_f16f16f32(
int M, int N, int K, const float alpha,
const c10::Half* A, int lda, const c10::Half* B, int ldb,
const float beta, float* C, int ldc);
}} // namespace at::native
} // namespace at::native

View File

@ -24,11 +24,7 @@
#include <ATen/ops/sparse_coo_tensor.h>
#endif
namespace at {
namespace native {
namespace sparse {
namespace impl {
namespace mkl {
namespace at::native::sparse::impl::mkl {
namespace {
@ -713,8 +709,4 @@ void triangular_solve_out_sparse_csr(
#endif
}
} // namespace mkl
} // namespace impl
} // namespace sparse
} // namespace native
} // namespace at

View File

@ -2,11 +2,7 @@
#include <ATen/Tensor.h>
namespace at {
namespace native {
namespace sparse {
namespace impl {
namespace mkl {
namespace at::native::sparse::impl::mkl {
void addmm_out_sparse_csr(
const Tensor& mat1,
@ -36,8 +32,4 @@ void triangular_solve_out_sparse_csr(
bool transpose,
bool unitriangular);
} // namespace mkl
} // namespace impl
} // namespace sparse
} // namespace native
} // namespace at

View File

@ -40,8 +40,7 @@ Tensor& _sparse_mm_mkl_(
#include <ATen/ExpandUtils.h>
#include <ATen/SparseCsrTensorImpl.h>
namespace at {
namespace sparse_csr {
namespace at::sparse_csr {
#ifdef MKL_ILP64
static constexpr ScalarType TORCH_INT_TYPE = at::kLong;
@ -257,7 +256,6 @@ Tensor& _sparse_mm_mkl_(
return self;
}
} // namespace native
} // namespace at
#endif // AT_MKL_ENABLED

View File

@ -2,8 +2,7 @@
#include <ATen/core/Tensor.h>
#include <ATen/SparseCsrTensorUtils.h>
namespace at {
namespace sparse_csr {
namespace at::sparse_csr {
Tensor& _sparse_mm_mkl_(
Tensor& self,
const SparseCsrTensor& sparse_,
@ -11,5 +10,4 @@ Tensor& _sparse_mm_mkl_(
const Tensor& t,
const Scalar& alpha,
const Scalar& beta);
} // namespace native
} // namespace at

View File

@ -21,7 +21,7 @@
#include <ATen/Parallel.h>
#include <ATen/TensorIterator.h>
namespace at { namespace native {
namespace at::native {
// In real-to-complex transform, MKL FFT only fills half of the values due to
// conjugate symmetry. See native/SpectralUtils.h for more details.
// The following structs are used to fill in the other half with symmetry in
@ -200,7 +200,7 @@ Tensor& _fft_c2c_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalizat
return out.copy_(result);
}
}} // namespace at::native
} // namespace at::native
#endif /* AT_MKL_ENABLED() || AT_POCKETFFT_ENABLED() */
#if AT_POCKETFFT_ENABLED()
@ -342,7 +342,7 @@ Tensor _fft_c2c_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization,
#include <ATen/mkl/Limits.h>
namespace at { namespace native {
namespace at::native {
// Constructs an mkl-fft plan descriptor representing the desired transform
// For complex types, strides are in units of 2 * element_size(dtype)
@ -570,7 +570,7 @@ Tensor _fft_c2c_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization,
return _exec_fft(out, self, self.sizes(), sorted_dims, normalization, forward);
}
}} // namespace at::native
} // namespace at::native
#else

View File

@ -52,8 +52,7 @@ Tensor& mkldnn_mul_(Tensor& self, const Tensor& other) {
#include <ATen/native/mkldnn/MKLDNNCommon.h>
namespace at {
namespace native {
namespace at::native {
static Tensor emptyBinaryOp(const Tensor& self, const Tensor& other) {
if (!self.requires_grad() && !other.requires_grad()) {
@ -155,7 +154,6 @@ Tensor& mkldnn_mul_(Tensor& self, const Tensor& other) {
return native::mkldnn_mul_out(self, other, self);
}
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED

View File

@ -6,10 +6,9 @@
#if AT_MKLDNN_ENABLED()
#include <ideep/tensor.hpp>
#include <utility>
namespace at {
namespace native {
namespace mkldnn {
namespace at::native::mkldnn {
struct ContextConv final {
ideep::tensor weight_packed_;
@ -32,15 +31,13 @@ struct ContextConv final {
ideep::attr_t attr)
: weight_packed_(std::move(weight_packed)),
at_bias_(std::move(at_bias)),
padding_(padding),
stride_(stride),
dilation_(dilation),
padding_(std::move(padding)),
stride_(std::move(stride)),
dilation_(std::move(dilation)),
groups_(groups),
attr_(attr) {}
};
} // namespace mkldnn
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()

View File

@ -41,7 +41,7 @@ REGISTER_NO_CPU_DISPATCH(mkldnn_convolution_transpose_backward_stub)
#include <ATen/native/ConvUtils.h>
#include <c10/util/irange.h>
namespace at { namespace native {
namespace at::native {
// follow check rules from native/Convolution.cpp without transpose supported
static void check_shape_forward(const Tensor& input,
@ -1080,6 +1080,6 @@ TORCH_LIBRARY_IMPL(mkldnn, Meta, m) {
TORCH_SELECTIVE_NAME("mkldnn::_convolution_transpose_pointwise"),
TORCH_FN(mkldnn_convolution_transpose_pointwise_meta));
}
}} // namespace at::native
} // namespace at::native
#endif

View File

@ -11,11 +11,7 @@
#if AT_MKLDNN_ENABLED()
namespace at {
namespace native {
namespace mkldnn {
namespace internal {
namespace convolution {
namespace at::native::mkldnn::internal::convolution {
c10::intrusive_ptr<mkldnn::ConvOpContext> createConvPrePackOpContext(
Tensor weight,
@ -280,10 +276,6 @@ Tensor conv_run(
return op_context->run(input);
}
} // namespace convolution
} // namespace internal
} // namespace mkldnn
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()

View File

@ -6,11 +6,7 @@
#if AT_MKLDNN_ENABLED()
namespace at {
namespace native {
namespace mkldnn {
namespace internal {
namespace convolution {
namespace at::native::mkldnn::internal::convolution {
c10::intrusive_ptr<mkldnn::ConvOpContext> createConvPrePackOpContext(
Tensor weight,
@ -40,10 +36,6 @@ Tensor run(ContextConv& context, const Tensor& input);
void run(ContextConv& context, const Tensor& input, void* output);
} // namespace convolution
} // namespace internal
} // namespace mkldnn
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()

View File

@ -24,8 +24,7 @@ Tensor& copy_mkldnn_(Tensor& self, const Tensor& src, bool non_blocking) {
#include <ATen/native/mkldnn/MKLDNNCommon.h>
namespace at {
namespace native {
namespace at::native {
Tensor& copy_mkldnn_(Tensor& self, const Tensor& src, bool non_blocking) {
TORCH_CHECK(
@ -43,7 +42,6 @@ Tensor& copy_mkldnn_(Tensor& self, const Tensor& src, bool non_blocking) {
return self;
}
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED

View File

@ -29,7 +29,7 @@ Tensor mkldnn_gelu_backward(const Tensor& grad_output, const Tensor& input, c10:
#include <ATen/native/mkldnn/MKLDNNCommon.h>
#include <ATen/native/mkldnn/Utils.h>
namespace at { namespace native {
namespace at::native {
Tensor mkldnn_gelu(const Tensor& input, c10::string_view approximate) {
if (input.scalar_type() == ScalarType::BFloat16) {
@ -59,6 +59,6 @@ Tensor mkldnn_gelu_backward(const Tensor& grad_output, const Tensor& input, c10:
grad_output.options().device_opt());
}
}}
}
#endif // AT_MKLDNN_ENABLED

View File

@ -54,8 +54,7 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_linear_backward(
#include <ATen/native/mkldnn/MKLDNNCommon.h>
#include <ATen/native/mkldnn/Utils.h>
namespace at {
namespace native {
namespace at::native {
Tensor mkldnn_linear(
const Tensor& self,
@ -445,7 +444,6 @@ TORCH_LIBRARY_IMPL(mkldnn, MkldnnCPU, m) {
TORCH_FN(mkldnn_linear_pointwise_binary));
}
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED

View File

@ -5,8 +5,7 @@
#if AT_MKLDNN_ENABLED()
namespace at {
namespace native {
namespace at::native {
C10_API Tensor mkldnn_linear_pointwise(
const Tensor& input_t,
const Tensor& weight_t,
@ -33,7 +32,6 @@ C10_API Tensor mkl_linear(
#endif// AT_MKL_ENABLED
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()

View File

@ -7,7 +7,7 @@
#include <ideep.hpp>
namespace at { namespace native {
namespace at::native {
/**
* `IntrusivePtrTargetWrapper` wraps a custom storage handle of a tensor
@ -209,6 +209,6 @@ TORCH_LIBRARY_IMPL(mkldnn, MkldnnCPU, m) {
TORCH_FN(nbytes_from_mkldnn));
}
}}
}
#endif // AT_MKLDNN_ENABLED()

View File

@ -22,7 +22,7 @@
#endif
namespace at { namespace native {
namespace at::native {
#if AT_MKLDNN_ENABLED()
@ -637,4 +637,4 @@ TORCH_LIBRARY_IMPL(mkl, CPU, m) {
#endif // AT_MKL_ENABLED && AT_MKLDNN_ENABLED
}}
}

View File

@ -93,8 +93,7 @@ void mkldnn_matmul_i8i8i32(
#include <ATen/native/mkldnn/MKLDNNCommon.h>
#include <ATen/native/mkldnn/Utils.h>
namespace at {
namespace native {
namespace at::native {
static bool use_mkldnn_bf16_matmul() {
return at::globalContext().userEnabledMkldnn() && mkldnn_bf16_device_check();
@ -513,7 +512,6 @@ void mkldnn_matmul_i8i8i32(
}
}
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED

View File

@ -4,7 +4,7 @@
#include <ATen/Config.h>
#include <ATen/native/LinearAlgebraUtils.h> // For TransposeType
namespace at { namespace native {
namespace at::native {
// result = beta * result + alpha * gemm(mat1, mat2)
TORCH_API void mkldnn_matmul(
@ -74,5 +74,3 @@ TORCH_API void mkldnn_matmul_i8i8i32(
const Tensor &result);
}
}

View File

@ -85,8 +85,7 @@ std::tuple<Tensor, Tensor, Tensor> _new_batch_norm_backward_mkldnn(
#include <ATen/native/layer_norm.h>
#include <ideep/abstract_types.hpp>
namespace at {
namespace native {
namespace at::native {
std::tuple<Tensor, Tensor, Tensor> mkldnn_layer_norm_last_index_weight_bias_f32(
const Tensor& input,
@ -279,7 +278,6 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm_backward(const Tensor& grad
weight.options().device_opt())));
}
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED

View File

@ -3,9 +3,7 @@
#if AT_MKLDNN_ENABLED()
#include <ATen/native/mkldnn/ConvPrepack.h>
namespace at {
namespace native {
namespace mkldnn {
namespace at::native::mkldnn {
c10::intrusive_ptr<ConvOpContext> MkldnnConvOpContext::create_context(
at::Tensor&& weight,
@ -40,8 +38,6 @@ void MkldnnConvOpContext::run(const Tensor& input, void* output) {
mkldnn::internal::convolution::run(op_context_, input, output);
}
} // namespace mkldnn
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()

View File

@ -6,9 +6,7 @@
#if AT_MKLDNN_ENABLED()
namespace at {
namespace native {
namespace mkldnn {
namespace at::native::mkldnn {
const static std::map<std::string, ideep::attr_t> fusion_attr_map = {
{"none", ideep::attr_t()},
@ -92,8 +90,6 @@ class MkldnnConvOpContext final : public ConvOpContext {
const ideep::attr_t& attr);
};
} // namespace mkldnn
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()

View File

@ -195,8 +195,7 @@ Tensor mkldnn_adaptive_avg_pool2d_backward(
#include <ATen/native/mkldnn/MKLDNNCommon.h>
#include <ATen/native/mkldnn/Utils.h>
namespace at {
namespace native {
namespace at::native {
static Tensor _mkldnn_pooling(
const Tensor& input,
@ -667,7 +666,6 @@ Tensor mkldnn_adaptive_avg_pool2d_backward(
/*algo*/ ideep::algorithm::pooling_avg_exclude_padding);
}
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED

View File

@ -22,7 +22,7 @@ std::tuple<Tensor, Tensor> mkldnn_prelu_backward(const Tensor& grad_output, cons
#include <ATen/native/mkldnn/MKLDNNCommon.h>
#include <ATen/native/mkldnn/Utils.h>
namespace at { namespace native {
namespace at::native {
Tensor mkldnn_prelu(const Tensor& input, const Tensor& weight) {
if (input.scalar_type() == ScalarType::BFloat16) {
@ -67,6 +67,6 @@ std::tuple<Tensor, Tensor> mkldnn_prelu_backward(const Tensor& grad_output, cons
weight.options().device_opt())));
}
}
}}
}
#endif // AT_MKLDNN_ENABLED

View File

@ -9,9 +9,7 @@
#include <torch/custom_class.h>
#include <torch/library.h>
namespace at {
namespace native {
namespace mkldnn {
namespace at::native::mkldnn {
using namespace internal::convolution;
@ -96,17 +94,13 @@ TORCH_LIBRARY_IMPL(mkldnn_prepacked, CPU, m) {
TORCH_SELECTIVE_NAME("mkldnn_prepacked::conv2d_run"), TORCH_FN(conv_run));
}
} // namespace mkldnn
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED()
#if AT_MKL_ENABLED() && AT_MKLDNN_ENABLED()
namespace at {
namespace native {
namespace mkl {
namespace at::native::mkl {
TORCH_LIBRARY(mkl, m) {
m.def(TORCH_SELECTIVE_SCHEMA(
@ -115,8 +109,6 @@ TORCH_LIBRARY(mkl, m) {
"mkl::_mkl_linear(Tensor X, Tensor MKL_W, Tensor ORI_W, Tensor? B, int batch_size) -> Tensor"));
}
} // namespace mkl
} // namespace native
} // namespace at
#endif // AT_MKL_ENABLED && AT_MKLDNN_ENABLED

View File

@ -32,7 +32,7 @@ Tensor mkldnn_relu_backward(const Tensor& grad_output, const Tensor& input, cons
#include <ATen/native/mkldnn/MKLDNNCommon.h>
#include <ATen/native/mkldnn/Utils.h>
namespace at { namespace native {
namespace at::native {
Tensor mkldnn_relu(const Tensor& input) {
if (input.scalar_type() == ScalarType::BFloat16) {
@ -71,6 +71,6 @@ Tensor mkldnn_relu_backward(const Tensor& grad_output, const Tensor& input, cons
grad_output.options().device_opt());
}
}}
}
#endif // AT_MKLDNN_ENABLED

View File

@ -27,8 +27,7 @@ Tensor mkldnn_softmax(
#include <ATen/native/mkldnn/MKLDNNCommon.h>
namespace at {
namespace native {
namespace at::native {
Tensor mkldnn_softmax(
const Tensor& self,
@ -45,7 +44,6 @@ Tensor mkldnn_softmax(
self.options().device_opt());
}
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED

View File

@ -8,7 +8,7 @@
#include <ATen/ops/empty_native.h>
#endif
namespace at { namespace native {
namespace at::native {
#if AT_MKLDNN_ENABLED()
@ -32,4 +32,4 @@ Tensor empty_mkldnn(IntArrayRef sizes, std::optional<ScalarType> dtype, std::opt
#endif // AT_MKLDNN_ENABLED()
}}
}

View File

@ -45,8 +45,7 @@ Tensor& mkldnn_transpose_(Tensor& self, int64_t dim0, int64_t dim1) {
#include <ATen/native/mkldnn/MKLDNNCommon.h>
namespace at {
namespace native {
namespace at::native {
Tensor mkldnn_view(const Tensor& self, IntArrayRef size) {
TORCH_CHECK(false,
@ -95,7 +94,6 @@ Tensor& mkldnn_transpose_(Tensor& self, int64_t dim0, int64_t dim1) {
TORCH_CHECK(false, "mkldnn_transpose_: in-place mkldnn operations are not supported yet");
}
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED

View File

@ -3,8 +3,7 @@
#include <ATen/ATen.h>
#include <c10/core/SymIntArrayRef.h>
namespace at {
namespace native {
namespace at::native {
Tensor mkldnn_view(const Tensor& self, IntArrayRef size);
@ -12,5 +11,4 @@ Tensor mkldnn_view_symint(const Tensor& self, c10::SymIntArrayRef size);
Tensor mkldnn_clone(const Tensor& self);
} // namespace native
} // namespace at

View File

@ -37,8 +37,7 @@ Tensor& mkldnn_tanh_(Tensor& self) {
#include <ATen/native/mkldnn/MKLDNNCommon.h>
namespace at {
namespace native {
namespace at::native {
Tensor mkldnn_sigmoid(const Tensor& self) {
ideep::tensor& x = itensor_from_mkldnn(self);
@ -72,7 +71,6 @@ Tensor& mkldnn_tanh_(Tensor& self) {
return self;
}
} // namespace native
} // namespace at
#endif // AT_MKLDNN_ENABLED

View File

@ -3,7 +3,7 @@
#include <ATen/native/Pool.h>
#include <c10/util/irange.h>
namespace at { namespace native {
namespace at::native {
std::vector<int64_t> pool_output_sizes(
IntArrayRef input_size,
@ -174,4 +174,4 @@ const std::map<c10::string_view, ideep::algorithm>& fusion_binary_alg_map() {
}
#endif // AT_MKLDNN_ENABLED()
}}
}

View File

@ -5,8 +5,7 @@
#include <ATen/native/DispatchStub.h>
#include <ATen/native/quantized/AffineQuantizerBase.h>
namespace at {
namespace native {
namespace at::native {
TORCH_API Tensor& quantize_tensor_per_tensor_affine(
const Tensor& rtensor,
@ -126,5 +125,4 @@ TORCH_API Tensor dequantize_tensor(
double scale,
int64_t zero_point);
} // namespace native
} // namespace at

View File

@ -2,8 +2,7 @@
#include <c10/macros/Export.h>
#include <c10/core/ScalarType.h>
namespace at {
namespace native {
namespace at::native {
// Quantize a float value into a uint value given scale and zero_point
template <typename T>
@ -43,5 +42,4 @@ requantize_from_int(double multiplier, int64_t zero_point, int64_t src);
int quantize_val_float_qparams(float scale, float zero_point, float value, int qmin, int qmax);
} // namespace native
} // namespace at

View File

@ -2,9 +2,7 @@
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace at::native {
Tensor& quantized_copy_from_float_(Tensor& self, const Tensor& src);
}
} // namespace at

View File

@ -2,8 +2,7 @@
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
namespace at {
namespace native {
namespace at::native {
using masked_fill_kernel_quantized_fn = void(*)(TensorIterator& iter, const Scalar& value, double scale, int zero_point);
using index_put_kernel_quantized_fn = void(*)(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate, double scale, int zero_point);
@ -11,5 +10,4 @@ DECLARE_DISPATCH(masked_fill_kernel_quantized_fn, masked_fill_kernel_quantized_s
DECLARE_DISPATCH(index_put_kernel_quantized_fn, index_put_kernel_quantized_stub)
} // native
} // at

View File

@ -46,7 +46,7 @@ using DeconvDesc = dnnl::deconvolution_forward::primitive_desc;
using DeconvParams = ideep::deconv_forward_params;
struct LinearPrimitiveCache : PrimitiveCache {
LinearPrimitiveCache() {}
LinearPrimitiveCache() = default;
LinearPrimitiveCache(
const PrimitiveCacheKey& key,
@ -74,7 +74,7 @@ struct LinearPrimitiveCache : PrimitiveCache {
};
struct ConvPrimitiveCache : PrimitiveCache {
ConvPrimitiveCache() {}
ConvPrimitiveCache() = default;
ConvPrimitiveCache(
const PrimitiveCacheKey& key,
@ -91,7 +91,7 @@ struct ConvPrimitiveCache : PrimitiveCache {
};
struct DeconvPrimitiveCache : PrimitiveCache {
DeconvPrimitiveCache() {}
DeconvPrimitiveCache() = default;
DeconvPrimitiveCache(
const PrimitiveCacheKey& key,

View File

@ -5,8 +5,7 @@
#include <ATen/native/transformers/attention.h>
#include <optional>
namespace at {
namespace native {
namespace at::native {
using fused_sdp_choice_fn = int64_t (*)(const Tensor& query_, const Tensor& key, const Tensor& value,
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, std::optional<double> scale, bool enable_gqa);
@ -68,5 +67,4 @@ using flash_attention_backward_fn = void (*)(
DECLARE_DISPATCH(flash_attention_fn, flash_attention_kernel)
DECLARE_DISPATCH(flash_attention_backward_fn, flash_attention_backward_kernel)
} // namespace native
} // namespace at

View File

@ -18,9 +18,7 @@
#include <ATen/ops/layer_norm.h>
#endif
namespace at {
namespace native {
namespace at::native {
namespace {
Tensor linear_for_ffn(
@ -147,5 +145,4 @@ Tensor transformer_encoder_layer_forward(
return x;
}
} // namespace native
} // namespace at

View File

@ -4,9 +4,7 @@
#include <c10/core/CPUAllocator.h>
#include <c10/util/accumulate.h>
namespace at {
namespace native {
namespace mobile {
namespace at::native::mobile {
Tensor empty_with_tail_padding(
const IntArrayRef size,
@ -61,6 +59,4 @@ Tensor allocate_padded_contiguous_if_needed(
return padded_input.copy_(input);
}
} // namespace mobile
} // namespace native
} // namespace at

View File

@ -2,9 +2,7 @@
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace mobile {
namespace at::native::mobile {
Tensor allocate_padded_contiguous_if_needed(
const Tensor& input,
@ -19,6 +17,4 @@ at::Tensor empty_with_tail_padding(
c10::MemoryFormat memory_format,
std::optional<DimnameList> maybe_names);
} // namespace mobile
} // namespace native
} // namespace at

View File

@ -1,8 +1,7 @@
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <c10/macros/Macros.h>
namespace at {
namespace detail {
namespace at::detail {
namespace {
@ -79,5 +78,4 @@ struct VulkanGuardImpl final : public c10::impl::DeviceGuardImplInterface {
C10_REGISTER_GUARD_IMPL(Vulkan, VulkanGuardImpl)
} // namespace detail
} // namespace at
} // namespace at::detail

View File

@ -11,8 +11,7 @@
#include <c10/util/irange.h>
namespace at::native::xnnpack {
namespace internal {
namespace convolution2d {
namespace internal::convolution2d {
namespace {
@ -464,7 +463,6 @@ Tensor conv2d_transpose_clamp_run(
return op_context->run(input);
}
} // namespace convolution2d
} // namespace internal
bool use_convolution2d(

View File

@ -7,9 +7,7 @@
#include <ATen/nnapi/nnapi_model_loader.h>
#include <c10/util/irange.h>
namespace torch {
namespace nnapi {
namespace bind {
namespace torch::nnapi::bind {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
nnapi_wrapper* nnapi;
@ -208,6 +206,4 @@ void NnapiCompilation::get_operand_type(const at::Tensor& t, ANeuralNetworksOper
CAFFE_THROW("Bad dtype: " + std::to_string(static_cast<int8_t>(t.scalar_type())));
}
} // namespace bind
} // namespace nnapi
} // namespace torch

View File

@ -8,9 +8,7 @@
#include <ATen/nnapi/nnapi_wrapper.h>
namespace torch {
namespace nnapi {
namespace bind {
namespace torch::nnapi::bind {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TORCH_API extern nnapi_wrapper* nnapi;
@ -59,8 +57,6 @@ struct NnapiCompilation : torch::jit::CustomClassHolder {
int32_t num_outputs_ {};
};
} // namespace bind
} // namespace nnapi
} // namespace torch
#endif // NNAPI_BIND_H_

View File

@ -21,8 +21,7 @@
#define NNAPI_CHECK(res) CAFFE_ENFORCE(res == ANEURALNETWORKS_NO_ERROR, "NNAPI returned error: ", res)
namespace caffe2 {
namespace nnapi {
namespace caffe2::nnapi {
namespace {
@ -264,4 +263,4 @@ int load_nnapi_model(
return 0;
}
}} // namespace caffe2::nnapi
} // namespace caffe2::nnapi

View File

@ -1,13 +1,12 @@
#ifndef NNAPI_MODEL_LOADER_H_
#define NNAPI_MODEL_LOADER_H_
#include <stdint.h>
#include <cstdint>
#include <ATen/nnapi/NeuralNetworks.h>
#include <ATen/nnapi/nnapi_wrapper.h>
namespace caffe2 {
namespace nnapi {
namespace caffe2::nnapi {
int load_nnapi_model(
struct nnapi_wrapper* nnapi,
@ -24,6 +23,6 @@ int load_nnapi_model(
int32_t* out_output_count,
size_t* out_bytes_consumed);
}} // namespace caffe2::nnapi
} // namespace caffe2::nnapi
#endif // NNAPI_MODEL_LOADER_H_

View File

@ -17,8 +17,8 @@
// This file is generated by nnapi/codegen.py
#ifndef NNAPI_WRAPPER_H_
#define NNAPI_WRAPPER_H_
#include <stddef.h>
#include <stdint.h>
#include <cstddef>
#include <cstdint>
#include <ATen/nnapi/NeuralNetworks.h>
struct nnapi_wrapper {
int(*_getDeviceCount)(uint32_t* numDevices);