diff --git a/torch/csrc/jit/backends/backend.h b/torch/csrc/jit/backends/backend.h index 5aae642fa55..db0205d395d 100644 --- a/torch/csrc/jit/backends/backend.h +++ b/torch/csrc/jit/backends/backend.h @@ -5,8 +5,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { // NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) inline c10::FunctionSchema getIsAvailableSchema() { @@ -115,5 +114,4 @@ class backend { } }; -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_debug_handler.cpp b/torch/csrc/jit/backends/backend_debug_handler.cpp index 13c9778c67c..6c2ba467bc6 100644 --- a/torch/csrc/jit/backends/backend_debug_handler.cpp +++ b/torch/csrc/jit/backends/backend_debug_handler.cpp @@ -2,8 +2,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { std::atomic BackendDebugInfoRecorder::unique_debug_handle_{0}; @@ -33,5 +32,4 @@ BackendDebugInfoMapType BackendDebugInfoRecorder::stopRecording() { return handles_to_inlined_callstack_ptrs_; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_debug_handler.h b/torch/csrc/jit/backends/backend_debug_handler.h index d4b00fe340f..4128832e7a0 100644 --- a/torch/csrc/jit/backends/backend_debug_handler.h +++ b/torch/csrc/jit/backends/backend_debug_handler.h @@ -7,8 +7,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { /* * BackendDebugHandleManager is responsible for issuing debug handles to @@ -136,5 +135,4 @@ class TORCH_API BackendDebugInfoRecorder { BackendDebugInfoMapType handles_to_inlined_callstack_ptrs_; }; -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_debug_info.cpp b/torch/csrc/jit/backends/backend_debug_info.cpp index 5f6fbb6d3f3..c6fdac06467 100644 --- a/torch/csrc/jit/backends/backend_debug_info.cpp +++ b/torch/csrc/jit/backends/backend_debug_info.cpp @@ -1,9 +1,7 @@ #include #include -namespace torch { -namespace jit { -namespace backend { +namespace torch::jit::backend { namespace { #ifdef BUILD_LITE_INTERPRETER static auto cls = torch::class_( @@ -18,6 +16,4 @@ static auto cls = torch::class_( #endif } // namespace -} // namespace backend -} // namespace jit -} // namespace torch +} // namespace torch::jit::backend diff --git a/torch/csrc/jit/backends/backend_debug_info.h b/torch/csrc/jit/backends/backend_debug_info.h index 291eb48132e..d6740b6c504 100644 --- a/torch/csrc/jit/backends/backend_debug_info.h +++ b/torch/csrc/jit/backends/backend_debug_info.h @@ -5,8 +5,7 @@ #endif #include -namespace torch { -namespace jit { +namespace torch::jit { constexpr static auto kBackendUtilsNamespace = "backendutils"; constexpr static auto kBackendDebugInfoClass = "BackendDebugInfo"; @@ -61,5 +60,4 @@ class PyTorchBackendDebugInfoDummy : public torch::CustomClassHolder { PyTorchBackendDebugInfoDummy() = default; }; #endif -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_detail.h b/torch/csrc/jit/backends/backend_detail.h index 7299ce259bc..e69a93ebb14 100644 --- a/torch/csrc/jit/backends/backend_detail.h +++ b/torch/csrc/jit/backends/backend_detail.h @@ -6,8 +6,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { using DebugHandleType = int64_t; @@ -37,5 +36,4 @@ TORCH_API Module codegen_backend_module( const c10::Dict& method_compile_spec, const c10::DictTypePtr& any_dict_ty); } // namespace detail -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_init.cpp b/torch/csrc/jit/backends/backend_init.cpp index 308857123d2..380c9f0d096 100644 --- a/torch/csrc/jit/backends/backend_init.cpp +++ b/torch/csrc/jit/backends/backend_init.cpp @@ -7,8 +7,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { // Get all types that are shared in the module hierarchy rooted at \p mod. std::unordered_set getSharedModuleTypes(Module& mod) { @@ -189,5 +188,4 @@ void initJitBackendBindings(PyObject* module) { "Object ", py::str(orig_module), " is not a ScriptModule")); }); } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_init.h b/torch/csrc/jit/backends/backend_init.h index e7be08c7659..7f2aac18bd0 100644 --- a/torch/csrc/jit/backends/backend_init.h +++ b/torch/csrc/jit/backends/backend_init.h @@ -3,9 +3,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { // Initialize Python bindings for JIT to_ functions. void initJitBackendBindings(PyObject* module); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_interface.cpp b/torch/csrc/jit/backends/backend_interface.cpp index 661a9ac78b4..a124b8adf92 100644 --- a/torch/csrc/jit/backends/backend_interface.cpp +++ b/torch/csrc/jit/backends/backend_interface.cpp @@ -1,10 +1,8 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { PyTorchBackendInterface::PyTorchBackendInterface() noexcept = default; PyTorchBackendInterface::~PyTorchBackendInterface() = default; -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_interface.h b/torch/csrc/jit/backends/backend_interface.h index 099575da528..331497f929d 100644 --- a/torch/csrc/jit/backends/backend_interface.h +++ b/torch/csrc/jit/backends/backend_interface.h @@ -2,8 +2,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { // Interface for a JIT backend. class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder { @@ -30,5 +29,4 @@ class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder { c10::IValue handle, c10::impl::GenericList inputs) = 0; }; -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_preprocess.h b/torch/csrc/jit/backends/backend_preprocess.h index 0a256134aa9..da4ebd5a937 100644 --- a/torch/csrc/jit/backends/backend_preprocess.h +++ b/torch/csrc/jit/backends/backend_preprocess.h @@ -1,8 +1,7 @@ #pragma once #include -namespace torch { -namespace jit { +namespace torch::jit { class backend_preprocess_register { std::string backend_name_; @@ -14,5 +13,4 @@ class backend_preprocess_register { detail::registerBackendPreprocessFunction(name, preprocess); } }; -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_resolver.cpp b/torch/csrc/jit/backends/backend_resolver.cpp index d6041a25591..9c113550f9a 100644 --- a/torch/csrc/jit/backends/backend_resolver.cpp +++ b/torch/csrc/jit/backends/backend_resolver.cpp @@ -2,8 +2,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { // Essentially ClassNamespaceValue from import_source.cpp without the // SourceImporterImpl reference. This helps resolve the @@ -67,5 +66,4 @@ std::shared_ptr loweredModuleResolver() { return resolver; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/backend_resolver.h b/torch/csrc/jit/backends/backend_resolver.h index b0d5727d9d9..9dd44837257 100644 --- a/torch/csrc/jit/backends/backend_resolver.h +++ b/torch/csrc/jit/backends/backend_resolver.h @@ -2,9 +2,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { // Create a Resolver for use in generating LoweredModules for specific backends. TORCH_API std::shared_ptr loweredModuleResolver(); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/backends/coreml/objc/PTMCoreMLTensorSpec.h b/torch/csrc/jit/backends/coreml/objc/PTMCoreMLTensorSpec.h index 51462972304..5aca1e51dd0 100644 --- a/torch/csrc/jit/backends/coreml/objc/PTMCoreMLTensorSpec.h +++ b/torch/csrc/jit/backends/coreml/objc/PTMCoreMLTensorSpec.h @@ -3,10 +3,7 @@ #include -namespace torch { -namespace jit { -namespace mobile { -namespace coreml { +namespace torch::jit::mobile::coreml { struct TensorSpec { std::string name = ""; @@ -26,7 +23,4 @@ static inline c10::ScalarType scalar_type(const std::string& type_string) { return c10::ScalarType::Undefined; } -} // namespace coreml -} // namespace mobile -} // namespace jit -} // namespace torch +} // namespace torch::jit::mobile::coreml diff --git a/torch/csrc/jit/backends/xnnpack/executor/xnn_executor.h b/torch/csrc/jit/backends/xnnpack/executor/xnn_executor.h index 33542d69c80..118af11d031 100644 --- a/torch/csrc/jit/backends/xnnpack/executor/xnn_executor.h +++ b/torch/csrc/jit/backends/xnnpack/executor/xnn_executor.h @@ -8,10 +8,7 @@ #include #include -namespace torch { -namespace jit { -namespace xnnpack { -namespace delegate { +namespace torch::jit::xnnpack::delegate { class XNNExecutor { private: @@ -68,7 +65,4 @@ class XNNExecutor { friend class XNNCompiler; }; -} // namespace delegate -} // namespace xnnpack -} // namespace jit -} // namespace torch +} // namespace torch::jit::xnnpack::delegate diff --git a/torch/csrc/jit/codegen/cuda/interface.cpp b/torch/csrc/jit/codegen/cuda/interface.cpp index d3e60781605..d91f3302d0a 100644 --- a/torch/csrc/jit/codegen/cuda/interface.cpp +++ b/torch/csrc/jit/codegen/cuda/interface.cpp @@ -9,10 +9,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace cuda { +namespace torch::jit::fuser::cuda { static std::atomic cuda_fusion_guard_mode{true}; @@ -131,7 +128,4 @@ bool skipNode(const std::string& symbol_str, bool flip) { getFuserInterface()->fn_skip_n(symbol_str, flip); } -} // namespace cuda -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::cuda diff --git a/torch/csrc/jit/codegen/cuda/interface.h b/torch/csrc/jit/codegen/cuda/interface.h index 0ccdfe2c9eb..926e4cb5d26 100644 --- a/torch/csrc/jit/codegen/cuda/interface.h +++ b/torch/csrc/jit/codegen/cuda/interface.h @@ -13,10 +13,7 @@ * Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp */ -namespace torch { -namespace jit { -namespace fuser { -namespace cuda { +namespace torch::jit::fuser::cuda { TORCH_API std::atomic& getCudaFusionGuardMode(); @@ -52,7 +49,4 @@ TORCH_API bool isEnabled(); TORCH_API bool setEnabled(bool is_enabled); TORCH_API bool canBeEnabled(); -} // namespace cuda -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::cuda diff --git a/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.h b/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.h index 2e6d5959632..72a94518b92 100644 --- a/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.h +++ b/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.h @@ -13,10 +13,7 @@ namespace at { struct DynamicLibrary; } -namespace torch { -namespace jit { -namespace fuser { -namespace cpu { +namespace torch::jit::fuser::cpu { // Represents a compiled CPU kernel and the metadata necessary to run it struct TORCH_API FusedKernelCPU : public FusedKernel { @@ -43,7 +40,4 @@ struct TORCH_API FusedKernelCPU : public FusedKernel { void (*kernel)(uint32_t, void**) = nullptr; }; -} // namespace cpu -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::cpu diff --git a/torch/csrc/jit/codegen/fuser/cpu/resource_strings.h b/torch/csrc/jit/codegen/fuser/cpu/resource_strings.h index 6d8bea228cf..134451f335f 100644 --- a/torch/csrc/jit/codegen/fuser/cpu/resource_strings.h +++ b/torch/csrc/jit/codegen/fuser/cpu/resource_strings.h @@ -2,10 +2,7 @@ #include -namespace torch { -namespace jit { -namespace fuser { -namespace cpu { +namespace torch::jit::fuser::cpu { /*with type_as not checking type of its input, a fusion group can have non-fp32 tensor as input. Correct code for this case is generated, however, nvrtc does @@ -101,7 +98,4 @@ JIT_API void ${kernelName}(IndexType totalElements, void ** args) { } )"); -} // namespace cpu -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::cpu diff --git a/torch/csrc/jit/codegen/fuser/cpu/temp_file.h b/torch/csrc/jit/codegen/fuser/cpu/temp_file.h index 9fb53bc962c..fdb0788d0a5 100644 --- a/torch/csrc/jit/codegen/fuser/cpu/temp_file.h +++ b/torch/csrc/jit/codegen/fuser/cpu/temp_file.h @@ -22,10 +22,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace cpu { +namespace torch::jit::fuser::cpu { #ifdef _MSC_VER int wmkstemps(wchar_t* tmpl, int suffix_len) { @@ -135,7 +132,4 @@ struct TempFile { std::string name_; }; -} // namespace cpu -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::cpu diff --git a/torch/csrc/jit/codegen/onednn/LlgaTensorImpl.cpp b/torch/csrc/jit/codegen/onednn/LlgaTensorImpl.cpp index 67ed298ca74..d07e1fd2309 100644 --- a/torch/csrc/jit/codegen/onednn/LlgaTensorImpl.cpp +++ b/torch/csrc/jit/codegen/onednn/LlgaTensorImpl.cpp @@ -4,10 +4,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { // Non-default dnnl::graph::allocator needs an allocator. // We would let it use c10::GetCPUAllocator's allocator, @@ -152,9 +149,6 @@ at::ScalarType LlgaTensorDesc::aten_scalar_type() const { } } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn #endif // AT_MKLDNN_ENABLED() diff --git a/torch/csrc/jit/codegen/onednn/LlgaTensorImpl.h b/torch/csrc/jit/codegen/onednn/LlgaTensorImpl.h index 64eed4ff481..d869a46e559 100644 --- a/torch/csrc/jit/codegen/onednn/LlgaTensorImpl.h +++ b/torch/csrc/jit/codegen/onednn/LlgaTensorImpl.h @@ -6,10 +6,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { // Engine represents a device and its context. From the device kind, the engine // knows how to generate code for the target device and what kind of device @@ -270,7 +267,4 @@ at::Tensor empty_llga( dnnl::graph::tensor llga_from_aten_tensor(const at::Tensor& tensor); -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/decompose_silu.cpp b/torch/csrc/jit/codegen/onednn/decompose_silu.cpp index 4d6807500cd..8a9e36c2973 100644 --- a/torch/csrc/jit/codegen/onednn/decompose_silu.cpp +++ b/torch/csrc/jit/codegen/onednn/decompose_silu.cpp @@ -5,10 +5,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { static bool shouldDecomposeSilu(Node* node) { if (node->kind() != aten::silu) { @@ -59,7 +56,4 @@ void DecomposeSiluForLLGA(std::shared_ptr& graph) { EliminateDeadCode(graph); } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/decompose_silu.h b/torch/csrc/jit/codegen/onednn/decompose_silu.h index 9d9a51502c8..fc4f115f1bd 100644 --- a/torch/csrc/jit/codegen/onednn/decompose_silu.h +++ b/torch/csrc/jit/codegen/onednn/decompose_silu.h @@ -2,14 +2,8 @@ #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { void DecomposeSiluForLLGA(std::shared_ptr& graph); -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/defer_size_check.cpp b/torch/csrc/jit/codegen/onednn/defer_size_check.cpp index 4d0f12564bd..ce76a3b3b76 100644 --- a/torch/csrc/jit/codegen/onednn/defer_size_check.cpp +++ b/torch/csrc/jit/codegen/onednn/defer_size_check.cpp @@ -2,10 +2,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { class SizeCheckMover { private: @@ -82,7 +79,4 @@ void DeferSizeCheck(std::shared_ptr& graph) { SizeCheckMover(graph->block(), graph).run(); } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/defer_size_check.h b/torch/csrc/jit/codegen/onednn/defer_size_check.h index 6e31cf202d3..e6d654199b2 100644 --- a/torch/csrc/jit/codegen/onednn/defer_size_check.h +++ b/torch/csrc/jit/codegen/onednn/defer_size_check.h @@ -2,14 +2,8 @@ #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { void DeferSizeCheck(std::shared_ptr& graph); -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/graph_fuser.cpp b/torch/csrc/jit/codegen/onednn/graph_fuser.cpp index 2a956362688..1c68edca761 100644 --- a/torch/csrc/jit/codegen/onednn/graph_fuser.cpp +++ b/torch/csrc/jit/codegen/onednn/graph_fuser.cpp @@ -5,10 +5,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { void CreateLlgaSubgraphs(std::shared_ptr& graph) { AliasDb db(graph); @@ -25,7 +22,4 @@ void CreateLlgaSubgraphs(std::shared_ptr& graph) { EliminateDeadCode(graph); } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/graph_fuser.h b/torch/csrc/jit/codegen/onednn/graph_fuser.h index ab37ad0211b..d0a802e2734 100644 --- a/torch/csrc/jit/codegen/onednn/graph_fuser.h +++ b/torch/csrc/jit/codegen/onednn/graph_fuser.h @@ -3,10 +3,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { struct WorkBlock : public std::pair { using pair::pair; @@ -47,7 +44,4 @@ class GraphRewriter { // torch/csrc/jit/passes/create_autodiff_subgraphs.cpp void CreateLlgaSubgraphs(std::shared_ptr& graph); -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/graph_helper.cpp b/torch/csrc/jit/codegen/onednn/graph_helper.cpp index 30f32f5994c..cc72489cec5 100644 --- a/torch/csrc/jit/codegen/onednn/graph_helper.cpp +++ b/torch/csrc/jit/codegen/onednn/graph_helper.cpp @@ -5,10 +5,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { using opkind = dnnl::graph::op::kind; @@ -615,7 +612,4 @@ bool LlgaNodeWrapper::useOpaqueLayout(size_t offset) const { return n->is(attr::output_layouts)[offset] == OPAQUE_LAYOUT; } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/graph_helper.h b/torch/csrc/jit/codegen/onednn/graph_helper.h index fbb5eaa84ae..bb817092877 100644 --- a/torch/csrc/jit/codegen/onednn/graph_helper.h +++ b/torch/csrc/jit/codegen/onednn/graph_helper.h @@ -5,10 +5,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { #define STRIDED_LAYOUT 0 #define OPAQUE_LAYOUT 1 @@ -98,7 +95,4 @@ class LlgaNodeWrapper { Node* n; }; -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp b/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp index 71e74501656..c8d7617fe86 100644 --- a/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp +++ b/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp @@ -5,10 +5,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { void GraphRewriter::cleanupSubgraphs() { auto curNode = *block_->nodes().rbegin(); @@ -138,7 +135,4 @@ std::optional GraphRewriter::tryMerge(Node* consumer, Node* producer) { return consumer; } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/guard_shape.cpp b/torch/csrc/jit/codegen/onednn/guard_shape.cpp index ee595b5c8d7..a71f980d631 100644 --- a/torch/csrc/jit/codegen/onednn/guard_shape.cpp +++ b/torch/csrc/jit/codegen/onednn/guard_shape.cpp @@ -5,10 +5,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { //! [ Note -- prepareFusionGroupAndGuardOutputs implementation ] //! shamelessly copying code from NNC (tensorexpr_fuser) with very little @@ -39,7 +36,4 @@ void prepareFusionGroupAndGuardOutputs(Block* block) { } } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/guard_shape.h b/torch/csrc/jit/codegen/onednn/guard_shape.h index 46f8a396a16..227aa35d10a 100644 --- a/torch/csrc/jit/codegen/onednn/guard_shape.h +++ b/torch/csrc/jit/codegen/onednn/guard_shape.h @@ -2,14 +2,8 @@ #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { void prepareFusionGroupAndGuardOutputs(Block* block); -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/interface.cpp b/torch/csrc/jit/codegen/onednn/interface.cpp index 64c101e15fe..c3edd9f4161 100644 --- a/torch/csrc/jit/codegen/onednn/interface.cpp +++ b/torch/csrc/jit/codegen/onednn/interface.cpp @@ -16,10 +16,8 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit { +namespace fuser::onednn { void fuseGraph(std::shared_ptr& g) { // Follow the process of the tensorexpr_fuser in profiling mode: @@ -95,8 +93,7 @@ void fuseGraph(std::shared_ptr& g) { } } -} // namespace onednn -} // namespace fuser +} // namespace fuser::onednn static Operation createLlgaKernel(const Node* node) { auto kernel = std::make_shared(node); @@ -178,5 +175,4 @@ RegisterOperators oneDNNGuardOp({ createLlgaGuardKernel, AliasAnalysisKind::FROM_SCHEMA), }); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/codegen/onednn/interface.h b/torch/csrc/jit/codegen/onednn/interface.h index 26b8a307a3d..4fd94081630 100644 --- a/torch/csrc/jit/codegen/onednn/interface.h +++ b/torch/csrc/jit/codegen/onednn/interface.h @@ -3,10 +3,8 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit { +namespace fuser::onednn { static std::atomic onednn_enabled{false}; @@ -16,8 +14,7 @@ static std::atomic& getLlgaEnabled() { C10_EXPORT void fuseGraph(std::shared_ptr& g); -} // namespace onednn -} // namespace fuser +} // namespace fuser::onednn struct C10_EXPORT RegisterLlgaFuseGraph : public PassManager { @@ -58,5 +55,4 @@ struct C10_EXPORT RegisterLlgaFuseGraph } }; -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/codegen/onednn/kernel.cpp b/torch/csrc/jit/codegen/onednn/kernel.cpp index bc127e7e59d..fa04614e0ab 100644 --- a/torch/csrc/jit/codegen/onednn/kernel.cpp +++ b/torch/csrc/jit/codegen/onednn/kernel.cpp @@ -4,10 +4,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { using namespace dnnl::graph; using data_type = dnnl::graph::logical_tensor::data_type; @@ -293,7 +290,4 @@ void LlgaKernel::run(Stack& stack) { #endif } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/kernel.h b/torch/csrc/jit/codegen/onednn/kernel.h index 6e32c8e3bc9..cf24190d9aa 100644 --- a/torch/csrc/jit/codegen/onednn/kernel.h +++ b/torch/csrc/jit/codegen/onednn/kernel.h @@ -10,10 +10,7 @@ #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { using ArgSpec = LlgaTensorDesc; using ArgSpecs = std::vector; @@ -89,7 +86,4 @@ class LlgaKernel { bool is_initialized_ = false; }; -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/layout_propagation.cpp b/torch/csrc/jit/codegen/onednn/layout_propagation.cpp index d2fdc611099..7377f3156b1 100644 --- a/torch/csrc/jit/codegen/onednn/layout_propagation.cpp +++ b/torch/csrc/jit/codegen/onednn/layout_propagation.cpp @@ -2,10 +2,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { static void LayoutPropagation(Node* n) { if (!LlgaGraphHelper::isLlgaSubgraph(n)) @@ -47,7 +44,4 @@ void PropagateLayout(const std::shared_ptr& graph) { LayoutPropagation(graph->block()); } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/layout_propagation.h b/torch/csrc/jit/codegen/onednn/layout_propagation.h index 5e48a097cd4..6af79ca7879 100644 --- a/torch/csrc/jit/codegen/onednn/layout_propagation.h +++ b/torch/csrc/jit/codegen/onednn/layout_propagation.h @@ -2,14 +2,8 @@ #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { void PropagateLayout(const std::shared_ptr& graph); -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/operator.h b/torch/csrc/jit/codegen/onednn/operator.h index 9cbe6c32c8d..1a40c4438b4 100644 --- a/torch/csrc/jit/codegen/onednn/operator.h +++ b/torch/csrc/jit/codegen/onednn/operator.h @@ -4,10 +4,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { class Operator { public: @@ -146,7 +143,4 @@ class Operator { dnnl::graph::op::kind k; }; -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/prepare_binary.cpp b/torch/csrc/jit/codegen/onednn/prepare_binary.cpp index d09b5777f97..19866a349f5 100644 --- a/torch/csrc/jit/codegen/onednn/prepare_binary.cpp +++ b/torch/csrc/jit/codegen/onednn/prepare_binary.cpp @@ -3,10 +3,7 @@ #include #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { static bool compareConstValue(Value* v, double d) { auto ival = toIValue(v); @@ -179,7 +176,4 @@ void PrepareBinaryForLLGA(const std::shared_ptr& graph) { ConvertScalarToTensor(graph->block()); } -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/prepare_binary.h b/torch/csrc/jit/codegen/onednn/prepare_binary.h index d7f90002e8f..beb66d8822b 100644 --- a/torch/csrc/jit/codegen/onednn/prepare_binary.h +++ b/torch/csrc/jit/codegen/onednn/prepare_binary.h @@ -2,10 +2,7 @@ #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { // Prepare binary ops for LLGA // @@ -20,7 +17,4 @@ namespace onednn { // void PrepareBinaryForLLGA(const std::shared_ptr& graph); -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/codegen/onednn/register_interface.cpp b/torch/csrc/jit/codegen/onednn/register_interface.cpp index a24f8fd14ed..032b28909fd 100644 --- a/torch/csrc/jit/codegen/onednn/register_interface.cpp +++ b/torch/csrc/jit/codegen/onednn/register_interface.cpp @@ -1,9 +1,6 @@ #include -namespace torch { -namespace jit { -namespace fuser { -namespace onednn { +namespace torch::jit::fuser::onednn { static bool canFuseNode(const Node* node) { switch (node->kind()) { @@ -48,7 +45,4 @@ class RegisterInterface { static RegisterInterface register_interface_; } // namespace -} // namespace onednn -} // namespace fuser -} // namespace jit -} // namespace torch +} // namespace torch::jit::fuser::onednn diff --git a/torch/csrc/jit/mobile/nnc/aot_compiler.cpp b/torch/csrc/jit/mobile/nnc/aot_compiler.cpp index 7efad835b97..3444da98da0 100644 --- a/torch/csrc/jit/mobile/nnc/aot_compiler.cpp +++ b/torch/csrc/jit/mobile/nnc/aot_compiler.cpp @@ -25,10 +25,7 @@ using namespace torch::jit; using namespace torch::jit::tensorexpr; -namespace torch { -namespace jit { -namespace mobile { -namespace nnc { +namespace torch::jit::mobile::nnc { // TODO(mvz): temporarily disable NNC backend in mobile builds. /* @@ -446,7 +443,4 @@ static c10::IValue preprocess( // static auto reg = torch::jit::backend_preprocess_register("nnc", preprocess); -} // namespace nnc -} // namespace mobile -} // namespace jit -} // namespace torch +} // namespace torch::jit::mobile::nnc diff --git a/torch/csrc/jit/mobile/nnc/aot_compiler.h b/torch/csrc/jit/mobile/nnc/aot_compiler.h index aee92906fcc..307fd8833ee 100644 --- a/torch/csrc/jit/mobile/nnc/aot_compiler.h +++ b/torch/csrc/jit/mobile/nnc/aot_compiler.h @@ -4,10 +4,7 @@ #include #include -namespace torch { -namespace jit { -namespace mobile { -namespace nnc { +namespace torch::jit::mobile::nnc { // Performs Ahead Of Time compilation of a given method in a model // returning the compiled function and LLVM assembly code @@ -18,7 +15,4 @@ TORCH_API std::pair, const std::string> aotCompile( const std::vector& types, const std::string& kernel_func_name = "func"); -} // namespace nnc -} // namespace mobile -} // namespace jit -} // namespace torch +} // namespace torch::jit::mobile::nnc diff --git a/torch/csrc/jit/mobile/nnc/backend.cpp b/torch/csrc/jit/mobile/nnc/backend.cpp index 89a96428a09..1cfe1bf50f1 100644 --- a/torch/csrc/jit/mobile/nnc/backend.cpp +++ b/torch/csrc/jit/mobile/nnc/backend.cpp @@ -3,10 +3,7 @@ #include #include -namespace torch { -namespace jit { -namespace mobile { -namespace nnc { +namespace torch::jit::mobile::nnc { class NNCBackend : public PyTorchBackendInterface { public: @@ -55,7 +52,4 @@ namespace { // static const auto cls = torch::jit::backend("nnc"); } // namespace -} // namespace nnc -} // namespace mobile -} // namespace jit -} // namespace torch +} // namespace torch::jit::mobile::nnc diff --git a/torch/csrc/jit/mobile/nnc/context.cpp b/torch/csrc/jit/mobile/nnc/context.cpp index cddbdd82c5e..6ad10583b80 100644 --- a/torch/csrc/jit/mobile/nnc/context.cpp +++ b/torch/csrc/jit/mobile/nnc/context.cpp @@ -7,10 +7,7 @@ #include -namespace torch { -namespace jit { -namespace mobile { -namespace nnc { +namespace torch::jit::mobile::nnc { constexpr int64_t kProducedNNCFileFormatVersion = 0x1L; @@ -342,7 +339,4 @@ Function* CompilationUnit::find_function(const c10::QualifiedName& name) const { return it->second.get(); } -} // namespace nnc -} // namespace mobile -} // namespace jit -} // namespace torch +} // namespace torch::jit::mobile::nnc diff --git a/torch/csrc/jit/mobile/nnc/context.h b/torch/csrc/jit/mobile/nnc/context.h index b9633ea5bfa..c5c8b8e8897 100644 --- a/torch/csrc/jit/mobile/nnc/context.h +++ b/torch/csrc/jit/mobile/nnc/context.h @@ -8,10 +8,7 @@ #include #include -namespace torch { -namespace jit { -namespace mobile { -namespace nnc { +namespace torch::jit::mobile::nnc { // Specify the requirements on an input tensor. // TODO: support input tensor with dynamic shape (PR #54982) @@ -223,7 +220,4 @@ class TORCH_API CompilationUnit { std::unordered_map> functions_; }; -} // namespace nnc -} // namespace mobile -} // namespace jit -} // namespace torch +} // namespace torch::jit::mobile::nnc diff --git a/torch/csrc/jit/mobile/nnc/registry.cpp b/torch/csrc/jit/mobile/nnc/registry.cpp index 088ac6ecd5b..18a15eccd23 100644 --- a/torch/csrc/jit/mobile/nnc/registry.cpp +++ b/torch/csrc/jit/mobile/nnc/registry.cpp @@ -1,13 +1,7 @@ #include -namespace torch { -namespace jit { -namespace mobile { -namespace nnc { +namespace torch::jit::mobile::nnc { C10_DEFINE_REGISTRY(NNCKernelRegistry, NNCKernel); -} // namespace nnc -} // namespace mobile -} // namespace jit -} // namespace torch +} // namespace torch::jit::mobile::nnc diff --git a/torch/csrc/jit/mobile/nnc/registry.h b/torch/csrc/jit/mobile/nnc/registry.h index c68a4f7a19c..22d0470d994 100644 --- a/torch/csrc/jit/mobile/nnc/registry.h +++ b/torch/csrc/jit/mobile/nnc/registry.h @@ -3,10 +3,7 @@ #include #include -namespace torch { -namespace jit { -namespace mobile { -namespace nnc { +namespace torch::jit::mobile::nnc { using nnc_kernel_function_type = int(void**); @@ -40,7 +37,4 @@ inline std::unique_ptr get_nnc_kernel(const std::string& id) { } // namespace registry -} // namespace nnc -} // namespace mobile -} // namespace jit -} // namespace torch +} // namespace torch::jit::mobile::nnc diff --git a/torch/csrc/jit/passes/dbr_quantization/remove_redundant_aliases.cpp b/torch/csrc/jit/passes/dbr_quantization/remove_redundant_aliases.cpp index 8ecab1bef91..1d35b30c050 100644 --- a/torch/csrc/jit/passes/dbr_quantization/remove_redundant_aliases.cpp +++ b/torch/csrc/jit/passes/dbr_quantization/remove_redundant_aliases.cpp @@ -5,8 +5,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { @@ -70,5 +69,4 @@ Module DBRQuantRemoveRedundantAliases(Module& module) { return module; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/dbr_quantization/remove_redundant_aliases.h b/torch/csrc/jit/passes/dbr_quantization/remove_redundant_aliases.h index 548d952014c..1e4beba0669 100644 --- a/torch/csrc/jit/passes/dbr_quantization/remove_redundant_aliases.h +++ b/torch/csrc/jit/passes/dbr_quantization/remove_redundant_aliases.h @@ -2,8 +2,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { // This function replaces instances of // @@ -17,5 +16,4 @@ namespace jit { // on the module forward, if it's safe to do so. TORCH_API Module DBRQuantRemoveRedundantAliases(Module& module); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/autograd_function_process.cpp b/torch/csrc/jit/passes/onnx/pattern_conversion/autograd_function_process.cpp index 8786af2ee7e..1f9b49c3c0a 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/autograd_function_process.cpp +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/autograd_function_process.cpp @@ -4,8 +4,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { void convertSubgraphToSubBlock(Block* block) { for (auto it = block->nodes().begin(), end = block->nodes().end(); @@ -54,5 +53,4 @@ void ONNXAutogradFunctionProcess(std::shared_ptr& graph) { convertSubgraphToSubBlock(graph->block()); } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/autograd_function_process.h b/torch/csrc/jit/passes/onnx/pattern_conversion/autograd_function_process.h index 4c3c07bb671..4b1c854fa2b 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/autograd_function_process.h +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/autograd_function_process.h @@ -2,10 +2,8 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { TORCH_API void ONNXAutogradFunctionProcess(std::shared_ptr& graph); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/common.cpp b/torch/csrc/jit/passes/onnx/pattern_conversion/common.cpp index 3e516498272..4210cde0f52 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/common.cpp +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/common.cpp @@ -1,7 +1,6 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { bool IndexingPatternFinder::IsSameSource(const Node* n, const Node* m) { const auto source_n = n->sourceRange().source(); @@ -41,5 +40,4 @@ std::vector IndexingPatternFinder::FetchSliceAndSelect( return slice_and_select_node; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/common.h b/torch/csrc/jit/passes/onnx/pattern_conversion/common.h index eb4f12a94e4..34ab95aceff 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/common.h +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/common.h @@ -4,8 +4,7 @@ // Functions used by both encapsulation and conversion. -namespace torch { -namespace jit { +namespace torch::jit { struct IndexingPatternFinder { public: @@ -15,5 +14,4 @@ struct IndexingPatternFinder { static bool IsSameSource(const Node* n, const Node* m); }; -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp index cd975d0375f..d11336a13e1 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp @@ -12,8 +12,7 @@ // EDITING THIS FILE? READ THIS FIRST! // see Note [Edit Pattern Conversion] in pattern_conversion.h -namespace torch { -namespace jit { +namespace torch::jit { // Converting inplace index_put to ONNX namespace { @@ -392,5 +391,4 @@ std::vector ConvertPatternFromSubblock( return res; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.h b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.h index 4fa3b0c47f9..16fdedee947 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.h +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.h @@ -3,8 +3,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { // Introduction // @@ -42,5 +41,4 @@ TORCH_API std::vector ConvertPatternFromSubblock( py::dict& env, py::set& values_in_env); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp index 7a98567a529..a51801ac836 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp @@ -7,8 +7,7 @@ // EDITING THIS FILE? READ THIS FIRST! // see Note [Edit Pattern Encapsulation] in pattern_encapsulation.h -namespace torch { -namespace jit { +namespace torch::jit { namespace { @@ -87,5 +86,4 @@ std::optional EncapsulatePatternIntoSubblock(Node* n) { return std::nullopt; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h index 6673d4aba3a..1f69cb8def1 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h @@ -2,8 +2,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { // Introduction // @@ -30,5 +29,4 @@ namespace jit { // pattern is stored as attr::name. TORCH_API std::optional EncapsulatePatternIntoSubblock(Node* n); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/dedup_module_uses.cpp b/torch/csrc/jit/passes/quantization/dedup_module_uses.cpp index 2c83bcbc10e..35b19597be4 100644 --- a/torch/csrc/jit/passes/quantization/dedup_module_uses.cpp +++ b/torch/csrc/jit/passes/quantization/dedup_module_uses.cpp @@ -5,8 +5,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { class ModuleUseDeduper { public: @@ -125,5 +124,4 @@ void DedupModuleUses(Module& module) { d.dedup(); } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/dedup_module_uses.h b/torch/csrc/jit/passes/quantization/dedup_module_uses.h index 0204d5f73f0..4094704129a 100644 --- a/torch/csrc/jit/passes/quantization/dedup_module_uses.h +++ b/torch/csrc/jit/passes/quantization/dedup_module_uses.h @@ -2,8 +2,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { /** Recursively deduplicate multiple uses of the same module by * creating an instance clone for each use of the module, which means @@ -24,5 +23,4 @@ namespace jit { */ TORCH_API void DedupModuleUses(Module& module); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/finalize.cpp b/torch/csrc/jit/passes/quantization/finalize.cpp index ebbd379f8da..f04d6106430 100644 --- a/torch/csrc/jit/passes/quantization/finalize.cpp +++ b/torch/csrc/jit/passes/quantization/finalize.cpp @@ -16,8 +16,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { @@ -275,5 +274,4 @@ Module FinalizeOnDevicePTQ( return module; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/finalize.h b/torch/csrc/jit/passes/quantization/finalize.h index d73addbc387..8325a32110b 100644 --- a/torch/csrc/jit/passes/quantization/finalize.h +++ b/torch/csrc/jit/passes/quantization/finalize.h @@ -4,8 +4,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { /** \brief Backend specific pass to fuse dequantize - op - quantize calls * as quantized_op calls. @@ -59,5 +58,4 @@ TORCH_API Module FinalizeOnDevicePTQ( Module& module, QuantType quant_type, const std::string& method_name); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/fusion_passes.cpp b/torch/csrc/jit/passes/quantization/fusion_passes.cpp index 2dbfdfe061b..46070c4939f 100644 --- a/torch/csrc/jit/passes/quantization/fusion_passes.cpp +++ b/torch/csrc/jit/passes/quantization/fusion_passes.cpp @@ -1,8 +1,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { void fuseQuantizeAddReluImpl(std::shared_ptr& graph) { @@ -59,5 +58,4 @@ void FuseQuantizedAddRelu(std::shared_ptr& graph) { fuseQuantizeAddReluImpl(graph); } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/fusion_passes.h b/torch/csrc/jit/passes/quantization/fusion_passes.h index b316fe2adab..c741d9cdb7e 100644 --- a/torch/csrc/jit/passes/quantization/fusion_passes.h +++ b/torch/csrc/jit/passes/quantization/fusion_passes.h @@ -2,8 +2,6 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { TORCH_API void FuseQuantizedAddRelu(std::shared_ptr& graph); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/helper.cpp b/torch/csrc/jit/passes/quantization/helper.cpp index 7eea68eb106..4e103b32701 100644 --- a/torch/csrc/jit/passes/quantization/helper.cpp +++ b/torch/csrc/jit/passes/quantization/helper.cpp @@ -5,8 +5,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { using graph_rewrite_helper::getFuncName; @@ -795,5 +794,4 @@ bool is_batchnorm3d_module( "__torch__.torch.nn.modules.batchnorm.BatchNorm3d"); } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/helper.h b/torch/csrc/jit/passes/quantization/helper.h index 21efbff7aa6..d6a0a326f25 100644 --- a/torch/csrc/jit/passes/quantization/helper.h +++ b/torch/csrc/jit/passes/quantization/helper.h @@ -8,8 +8,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { using graph_rewrite_helper::getFuncName; @@ -212,5 +211,4 @@ bool is_batchnorm3d_module( const Match& match, const std::unordered_map& vmap); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/insert_observers.cpp b/torch/csrc/jit/passes/quantization/insert_observers.cpp index 9aacd481a55..4a0d600ca1b 100644 --- a/torch/csrc/jit/passes/quantization/insert_observers.cpp +++ b/torch/csrc/jit/passes/quantization/insert_observers.cpp @@ -17,8 +17,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { using ModuleQConfigMap = std::unordered_map>; @@ -1720,5 +1719,4 @@ Module InsertObserversForOnDevicePTQ( cloned_module, observer_method_name, /* is_entry_point */ true); return cloned_module; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/insert_observers.h b/torch/csrc/jit/passes/quantization/insert_observers.h index e8857318261..7dbac9cfca6 100644 --- a/torch/csrc/jit/passes/quantization/insert_observers.h +++ b/torch/csrc/jit/passes/quantization/insert_observers.h @@ -14,8 +14,7 @@ struct hash { } // namespace std -namespace torch { -namespace jit { +namespace torch::jit { using QConfig = std::tuple; using QConfigDict = std::unordered_map>; @@ -64,5 +63,4 @@ TORCH_API Module InsertObserversForOnDevicePTQ( bool inplace, QuantType quant_type = QuantType::STATIC); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp index 05c19bdb38a..8739c4fcaf4 100644 --- a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp +++ b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp @@ -15,8 +15,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { using graph_rewrite_helper::PatternInfo; @@ -1841,5 +1840,4 @@ Module InsertQuantDeQuantOnDevicePTQ( h.propagateQuantizationOps(module); return module; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/insert_quant_dequant.h b/torch/csrc/jit/passes/quantization/insert_quant_dequant.h index de2b31fdba7..9bda42edae4 100644 --- a/torch/csrc/jit/passes/quantization/insert_quant_dequant.h +++ b/torch/csrc/jit/passes/quantization/insert_quant_dequant.h @@ -4,8 +4,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { /** Replicate quantize node for prim::If blocks, so that we can match * quantization patterns in prim::If blocks @@ -42,5 +41,4 @@ TORCH_API Module InsertQuantDeQuantOnDevicePTQ( bool debug, QuantType quant_type = QuantType::STATIC); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/quantization_patterns.h b/torch/csrc/jit/passes/quantization/quantization_patterns.h index 80cf46d7e02..c5f8e796dca 100644 --- a/torch/csrc/jit/passes/quantization/quantization_patterns.h +++ b/torch/csrc/jit/passes/quantization/quantization_patterns.h @@ -10,8 +10,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { struct QuantFusionInfo { std::string quantized_op_name; @@ -1260,5 +1259,4 @@ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %di std::move(conv_transpose2d_with_quant_prepack)}}; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/quantization_type.cpp b/torch/csrc/jit/passes/quantization/quantization_type.cpp index 66e99c06a52..290cbd725e7 100644 --- a/torch/csrc/jit/passes/quantization/quantization_type.cpp +++ b/torch/csrc/jit/passes/quantization/quantization_type.cpp @@ -1,7 +1,6 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { std::ostream& operator<<(std::ostream& os, QuantType t) { switch (t) { @@ -17,5 +16,4 @@ std::ostream& operator<<(std::ostream& os, QuantType t) { return os; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/quantization_type.h b/torch/csrc/jit/passes/quantization/quantization_type.h index ac4afe90ed9..1b91854a5e5 100644 --- a/torch/csrc/jit/passes/quantization/quantization_type.h +++ b/torch/csrc/jit/passes/quantization/quantization_type.h @@ -2,8 +2,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { // Quantization type (dynamic quantization, static quantization). // Should match the Python enum in quantize_jit.py @@ -11,5 +10,4 @@ enum QuantType : std::uint8_t { DYNAMIC = 0, STATIC }; std::ostream& operator<<(std::ostream& os, QuantType t); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/register_packed_params.cpp b/torch/csrc/jit/passes/quantization/register_packed_params.cpp index c3696cdc510..589aedea3d8 100644 --- a/torch/csrc/jit/passes/quantization/register_packed_params.cpp +++ b/torch/csrc/jit/passes/quantization/register_packed_params.cpp @@ -7,8 +7,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { bool isPrepackNode(Node* n) { @@ -144,5 +143,4 @@ std::unordered_set RegisterPrePackParams( return packed_param_names; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/quantization/register_packed_params.h b/torch/csrc/jit/passes/quantization/register_packed_params.h index c1cbf1b27bb..dcee7144f66 100644 --- a/torch/csrc/jit/passes/quantization/register_packed_params.h +++ b/torch/csrc/jit/passes/quantization/register_packed_params.h @@ -4,8 +4,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { using PrePackParamFilterFn = std::function; @@ -16,5 +15,4 @@ TORCH_API std::unordered_set RegisterPrePackParams( const std::string& attr_prefix); TORCH_API std::string joinPaths(const std::vector& paths); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp index 866feb97381..7ec05500ded 100644 --- a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp +++ b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp @@ -6,8 +6,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { IValue deepCopy(const IValue& self) { @@ -305,5 +304,4 @@ void checkAliasAnnotation( checkWrites(inputsToCheck, inputsDeepCopy); } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/utils/check_alias_annotation.h b/torch/csrc/jit/passes/utils/check_alias_annotation.h index df491c8ea3d..e227c3bb456 100644 --- a/torch/csrc/jit/passes/utils/check_alias_annotation.h +++ b/torch/csrc/jit/passes/utils/check_alias_annotation.h @@ -6,8 +6,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { // Verify that alias annotations are correct. See impl for definition of // "correct". @@ -18,5 +17,4 @@ TORCH_API void checkAliasAnnotation( const std::shared_ptr& graph, std::vector pythonInputs, const std::string& unqualifiedOpName); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/utils/memory_dag.cpp b/torch/csrc/jit/passes/utils/memory_dag.cpp index 3ecbbb8273a..8ad213082f5 100644 --- a/torch/csrc/jit/passes/utils/memory_dag.cpp +++ b/torch/csrc/jit/passes/utils/memory_dag.cpp @@ -4,8 +4,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { namespace { void makePointerToImpl(Element* from, Element* to) { @@ -232,5 +231,4 @@ void MemoryDAG::setWildcards( Element* MemoryDAG::unsafeMakeFreshValue(const Value* v) { return makeFreshValueImpl(v, indexToElementMap_); } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/utils/memory_dag.h b/torch/csrc/jit/passes/utils/memory_dag.h index 1d2292fe90c..dc6d5b24a09 100644 --- a/torch/csrc/jit/passes/utils/memory_dag.h +++ b/torch/csrc/jit/passes/utils/memory_dag.h @@ -16,8 +16,7 @@ // Uses a compressed index representation for faster comparisons typedef c10::SparseBitVector<256> MemoryLocations; -namespace torch { -namespace jit { +namespace torch::jit { struct Value; @@ -172,5 +171,4 @@ class TORCH_API MemoryDAGBuilder { // the map to construct the `MemoryDAG` std::vector> indexToElementMap_; }; -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/utils/op_registry.cpp b/torch/csrc/jit/passes/utils/op_registry.cpp index 5d4d9ce4a33..2538c90b457 100644 --- a/torch/csrc/jit/passes/utils/op_registry.cpp +++ b/torch/csrc/jit/passes/utils/op_registry.cpp @@ -2,8 +2,7 @@ // Location for Commonly Used Shape registries -namespace torch { -namespace jit { +namespace torch::jit { // Requirements: // dims : preserved from the first argument @@ -72,5 +71,4 @@ std::shared_ptr ops_one_tensor_in_shape_transform() { }); return ops; }; -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/utils/op_registry.h b/torch/csrc/jit/passes/utils/op_registry.h index d68d1d6192d..85d9ac8c7d2 100644 --- a/torch/csrc/jit/passes/utils/op_registry.h +++ b/torch/csrc/jit/passes/utils/op_registry.h @@ -4,8 +4,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { // Moved from shape_analysis.cpp // Requirements: @@ -27,5 +26,4 @@ std::shared_ptr nn_ops_first_input_preserving(); // tensor inputs : 1 // tensor outputs : 1 std::shared_ptr ops_one_tensor_in_shape_transform(); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/utils/optimization_utils.cpp b/torch/csrc/jit/passes/utils/optimization_utils.cpp index 2e2eb8299fd..e5c25f8a0a2 100644 --- a/torch/csrc/jit/passes/utils/optimization_utils.cpp +++ b/torch/csrc/jit/passes/utils/optimization_utils.cpp @@ -1,7 +1,6 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { bool nonConstantParameters(Node* n) { // Checks if the parameters, not including the @@ -14,5 +13,4 @@ bool nonConstantParameters(Node* n) { return false; } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/utils/optimization_utils.h b/torch/csrc/jit/passes/utils/optimization_utils.h index 6018fbea6da..720523ede4c 100644 --- a/torch/csrc/jit/passes/utils/optimization_utils.h +++ b/torch/csrc/jit/passes/utils/optimization_utils.h @@ -3,12 +3,10 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { // Checks if the parameters, not including the // first param are all constants. bool nonConstantParameters(Node* n); -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/passes/utils/subgraph_utils.cpp b/torch/csrc/jit/passes/utils/subgraph_utils.cpp index 0cc07a18c05..8fd18e4717e 100644 --- a/torch/csrc/jit/passes/utils/subgraph_utils.cpp +++ b/torch/csrc/jit/passes/utils/subgraph_utils.cpp @@ -9,9 +9,7 @@ #include -namespace torch { -namespace jit { -namespace SubgraphUtils { +namespace torch::jit::SubgraphUtils { namespace { bool hasSubgraph(Node* n) { @@ -633,6 +631,4 @@ std::string generateNameForGraph( return truncateStrWithHash(graph_name.str(), maxlen); } -} // namespace SubgraphUtils -} // namespace jit -} // namespace torch +} // namespace torch::jit::SubgraphUtils diff --git a/torch/csrc/jit/passes/utils/subgraph_utils.h b/torch/csrc/jit/passes/utils/subgraph_utils.h index dd761409ca2..fc5ba3e415e 100644 --- a/torch/csrc/jit/passes/utils/subgraph_utils.h +++ b/torch/csrc/jit/passes/utils/subgraph_utils.h @@ -4,14 +4,11 @@ #include #include -namespace torch { -namespace jit { - // Utilities for dealing with nodes that contain subgraphs. // // They handle the complexity of editing inputs/outputs as you merge nodes in // and out of subgraphs. -namespace SubgraphUtils { +namespace torch::jit::SubgraphUtils { // Create a new subgraph node that contains only `n`. The new subgraph will have // `subgraphKind` as its type. @@ -70,6 +67,4 @@ TORCH_API std::string generateNameForGraph( size_t maxlen = 40, const std::string& prefix = "fused"); -} // namespace SubgraphUtils -} // namespace jit -} // namespace torch +} // namespace torch::jit::SubgraphUtils diff --git a/torch/csrc/jit/tensorexpr/bounds_inference.h b/torch/csrc/jit/tensorexpr/bounds_inference.h index 300cb89a788..67fff99dec7 100644 --- a/torch/csrc/jit/tensorexpr/bounds_inference.h +++ b/torch/csrc/jit/tensorexpr/bounds_inference.h @@ -6,9 +6,7 @@ #include #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { class Expr; class Buf; @@ -74,6 +72,4 @@ TORCH_API bool isOverlapping( const StorePtr& S, const LoadPtr& L); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/bounds_overlap.h b/torch/csrc/jit/tensorexpr/bounds_overlap.h index 5cc502cdecd..0dbb6972787 100644 --- a/torch/csrc/jit/tensorexpr/bounds_overlap.h +++ b/torch/csrc/jit/tensorexpr/bounds_overlap.h @@ -6,10 +6,7 @@ #include #include -namespace torch { -namespace jit { -namespace tensorexpr { -namespace analysis { +namespace torch::jit::tensorexpr::analysis { // A simple class containing the start and end of a range in a single dimension. struct TORCH_API Bound { @@ -121,7 +118,4 @@ std::vector TORCH_API subtractIndicesBounds( std::vector TORCH_API subtractIndicesBounds(const IndexBounds& A, const IndexBounds& B); -} // namespace analysis -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr::analysis diff --git a/torch/csrc/jit/tensorexpr/cpp_intrinsics.h b/torch/csrc/jit/tensorexpr/cpp_intrinsics.h index 3149335ea30..0e4bb6a6152 100644 --- a/torch/csrc/jit/tensorexpr/cpp_intrinsics.h +++ b/torch/csrc/jit/tensorexpr/cpp_intrinsics.h @@ -1,8 +1,6 @@ #pragma once -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { constexpr auto cpp_intrinsics_definition = R"( namespace std { @@ -31,6 +29,4 @@ To bitcast(const From& v) { } // namespace std )"; -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/cuda_random.h b/torch/csrc/jit/tensorexpr/cuda_random.h index 987ac5211d9..ce59bba11e8 100644 --- a/torch/csrc/jit/tensorexpr/cuda_random.h +++ b/torch/csrc/jit/tensorexpr/cuda_random.h @@ -1,8 +1,6 @@ #pragma once -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { constexpr auto philox_random_string = R"( @@ -99,6 +97,4 @@ __device__ __inline__ float Uint32ToFloat(unsigned int x) { )"; -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/fwd_decls.h b/torch/csrc/jit/tensorexpr/fwd_decls.h index 84c34a278a0..d0a4acbc316 100644 --- a/torch/csrc/jit/tensorexpr/fwd_decls.h +++ b/torch/csrc/jit/tensorexpr/fwd_decls.h @@ -2,9 +2,7 @@ #include #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { template using NodePtr = std::shared_ptr; @@ -124,6 +122,4 @@ using SyncThreadsPtr = NodePtr; AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE); #undef IMM_DECLARE -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/ir.h b/torch/csrc/jit/tensorexpr/ir.h index 6afd053c8c4..8360fb950fa 100644 --- a/torch/csrc/jit/tensorexpr/ir.h +++ b/torch/csrc/jit/tensorexpr/ir.h @@ -11,9 +11,7 @@ #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { enum CompareSelectOperation { kEQ = 0, @@ -918,6 +916,4 @@ TORCH_API ExprPtr flatten_index( const std::vector& indices, const std::vector& strides); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/ir_cloner.h b/torch/csrc/jit/tensorexpr/ir_cloner.h index 3336fb0dc59..dd626eeb4c9 100644 --- a/torch/csrc/jit/tensorexpr/ir_cloner.h +++ b/torch/csrc/jit/tensorexpr/ir_cloner.h @@ -5,9 +5,7 @@ #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { class TORCH_API IRCloner : public IRMutator { public: @@ -61,6 +59,4 @@ class TORCH_API IRCloner : public IRMutator { StmtPtr mutate(const CondPtr& v) override; }; -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/ir_verifier.h b/torch/csrc/jit/tensorexpr/ir_verifier.h index 020c01a2334..e8e887ac80a 100644 --- a/torch/csrc/jit/tensorexpr/ir_verifier.h +++ b/torch/csrc/jit/tensorexpr/ir_verifier.h @@ -3,9 +3,7 @@ #include #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { class Expr; class ExprHandle; @@ -53,6 +51,4 @@ TORCH_API void verify(const StmtPtr&); TORCH_API void verify(const ExprPtr&); TORCH_API void verify(const ExprHandle&); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/conv2d.h b/torch/csrc/jit/tensorexpr/operators/conv2d.h index f842a1350a5..9aa328d98b6 100644 --- a/torch/csrc/jit/tensorexpr/operators/conv2d.h +++ b/torch/csrc/jit/tensorexpr/operators/conv2d.h @@ -3,9 +3,7 @@ #include #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { // An API to compute 2D depthwise convolutions with bias. TORCH_API Tensor conv2d_depthwise( @@ -100,6 +98,4 @@ Tensor computeMkldnnPrepackedConvRun( const std::vector& outputStrides, const std::optional& outputType, at::Device device); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/matmul.h b/torch/csrc/jit/tensorexpr/operators/matmul.h index 40ef3cfd9b6..d572a1c396c 100644 --- a/torch/csrc/jit/tensorexpr/operators/matmul.h +++ b/torch/csrc/jit/tensorexpr/operators/matmul.h @@ -2,9 +2,7 @@ #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { Tensor computeMatmul( const std::vector& inputs, @@ -19,6 +17,4 @@ Tensor computeAddMM( const std::optional& outputType, at::Device device); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/norm.h b/torch/csrc/jit/tensorexpr/operators/norm.h index dbe6140cca8..e531943237b 100644 --- a/torch/csrc/jit/tensorexpr/operators/norm.h +++ b/torch/csrc/jit/tensorexpr/operators/norm.h @@ -2,9 +2,7 @@ #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { Tensor computeBatchNorm( const std::vector& inputs, @@ -13,6 +11,4 @@ Tensor computeBatchNorm( const std::optional& outputType, at::Device device); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/pointwise.h b/torch/csrc/jit/tensorexpr/operators/pointwise.h index 1e3366a2858..8f8f6240d19 100644 --- a/torch/csrc/jit/tensorexpr/operators/pointwise.h +++ b/torch/csrc/jit/tensorexpr/operators/pointwise.h @@ -2,9 +2,7 @@ #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { TORCH_API Tensor computeSign( const std::vector& inputs, @@ -81,6 +79,4 @@ Tensor computeScalar( const std::function& innerExpr); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/quantization.h b/torch/csrc/jit/tensorexpr/operators/quantization.h index d48c9e3273b..51bdbe730a6 100644 --- a/torch/csrc/jit/tensorexpr/operators/quantization.h +++ b/torch/csrc/jit/tensorexpr/operators/quantization.h @@ -2,9 +2,7 @@ #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { TORCH_API ExprHandle quantizePerTensorQParamFromArg(ArgValue arg); @@ -155,6 +153,4 @@ TORCH_API Tensor computeQuantizedSigmoidExternalCall( const std::vector& outputStrides, const std::optional& outputType, at::Device); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/reduction.h b/torch/csrc/jit/tensorexpr/operators/reduction.h index 7d25e14a171..615d75c397c 100644 --- a/torch/csrc/jit/tensorexpr/operators/reduction.h +++ b/torch/csrc/jit/tensorexpr/operators/reduction.h @@ -2,9 +2,7 @@ #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { TORCH_API Tensor computeSum( const std::vector& inputs, @@ -31,6 +29,4 @@ Tensor computeMax( const std::optional& outputType, at::Device device); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/softmax.h b/torch/csrc/jit/tensorexpr/operators/softmax.h index d5dd7fd429b..f2a5698673c 100644 --- a/torch/csrc/jit/tensorexpr/operators/softmax.h +++ b/torch/csrc/jit/tensorexpr/operators/softmax.h @@ -2,9 +2,7 @@ #include -namespace torch { -namespace jit { -namespace tensorexpr { +namespace torch::jit::tensorexpr { Tensor computeSoftmax( const std::vector& inputs, @@ -12,6 +10,4 @@ Tensor computeSoftmax( const std::vector& outputStrides, bool log_softmax); -} // namespace tensorexpr -} // namespace jit -} // namespace torch +} // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/testing/file_check.cpp b/torch/csrc/jit/testing/file_check.cpp index 97273ef4a11..d6af1d2a1e3 100644 --- a/torch/csrc/jit/testing/file_check.cpp +++ b/torch/csrc/jit/testing/file_check.cpp @@ -23,10 +23,7 @@ #include #include -namespace torch { -namespace jit { - -namespace testing { +namespace torch::jit::testing { enum CheckType { CHECK, @@ -633,6 +630,4 @@ FileCheck* FileCheck::check_regex(const std::string& str) { return this; } -} // namespace testing -} // namespace jit -} // namespace torch +} // namespace torch::jit::testing diff --git a/torch/csrc/jit/testing/file_check.h b/torch/csrc/jit/testing/file_check.h index 6e9290f5130..fd09fcc6ad3 100644 --- a/torch/csrc/jit/testing/file_check.h +++ b/torch/csrc/jit/testing/file_check.h @@ -4,8 +4,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { struct Graph; @@ -77,5 +76,4 @@ struct FileCheck { std::unique_ptr fcImpl; }; } // namespace testing -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/testing/hooks_for_testing.cpp b/torch/csrc/jit/testing/hooks_for_testing.cpp index 553938afd77..d23da57c74c 100644 --- a/torch/csrc/jit/testing/hooks_for_testing.cpp +++ b/torch/csrc/jit/testing/hooks_for_testing.cpp @@ -2,8 +2,7 @@ #include -namespace torch { -namespace jit { +namespace torch::jit { static ModuleHook emit_module_callback; void didFinishEmitModule(Module module) { @@ -28,5 +27,4 @@ std::pair getEmitHooks() { return std::make_pair(emit_module_callback, emit_function_callback); } -} // namespace jit -} // namespace torch +} // namespace torch::jit diff --git a/torch/csrc/jit/testing/hooks_for_testing.h b/torch/csrc/jit/testing/hooks_for_testing.h index 108dea3f1f7..5613a0d2447 100644 --- a/torch/csrc/jit/testing/hooks_for_testing.h +++ b/torch/csrc/jit/testing/hooks_for_testing.h @@ -4,8 +4,7 @@ #include #include -namespace torch { -namespace jit { +namespace torch::jit { struct Module; using ModuleHook = std::function; @@ -17,5 +16,4 @@ TORCH_API void setEmitHooks(ModuleHook for_module, FunctionHook for_fn); TORCH_API std::pair getEmitHooks(); -} // namespace jit -} // namespace torch +} // namespace torch::jit