diff --git a/.clang-format b/.clang-format index 31be219b186..0b94540e7a2 100644 --- a/.clang-format +++ b/.clang-format @@ -106,10 +106,11 @@ StatementMacros: - C10_DEFINE_int32 - C10_DEFINE_int64 - C10_DEFINE_string + - DEFINE_BINARY - PyObject_HEAD - PyObject_VAR_HEAD - PyException_HEAD - - DEFINE_BINARY + - TORCH_DECLARE_bool TabWidth: 8 UseTab: Never diff --git a/aten/src/ATen/native/DispatchStub.h b/aten/src/ATen/native/DispatchStub.h index 54835cefbaf..fc8a5f1962d 100644 --- a/aten/src/ATen/native/DispatchStub.h +++ b/aten/src/ATen/native/DispatchStub.h @@ -20,7 +20,7 @@ // // In native/MyKernel.h: // using fn_type = void(*)(const Tensor& x); -// DECLARE_DISPATCH(fn_type, stub); +// DECLARE_DISPATCH(fn_type, stub) // // In native/MyKernel.cpp // DEFINE_DISPATCH(stub); diff --git a/aten/src/ATen/native/cpu/ReducedPrecisionFloatGemvFastPathKernel.cpp b/aten/src/ATen/native/cpu/ReducedPrecisionFloatGemvFastPathKernel.cpp index e266dcf202a..6eced4b7a4f 100644 --- a/aten/src/ATen/native/cpu/ReducedPrecisionFloatGemvFastPathKernel.cpp +++ b/aten/src/ATen/native/cpu/ReducedPrecisionFloatGemvFastPathKernel.cpp @@ -476,8 +476,8 @@ void bf16_gemv_trans( #if !defined(C10_MOBILE) REGISTER_DISPATCH(fp16_dot_with_fp32_arith_stub, &fp16_dot_with_fp32_arith) REGISTER_DISPATCH(fp16_gemv_trans_stub, &fp16_gemv_trans) -REGISTER_DISPATCH(bf16_dot_with_fp32_arith_stub, &bf16_dot_with_fp32_arith); -REGISTER_DISPATCH(bf16_gemv_trans_stub, &bf16_gemv_trans); +REGISTER_DISPATCH(bf16_dot_with_fp32_arith_stub, &bf16_dot_with_fp32_arith) +REGISTER_DISPATCH(bf16_gemv_trans_stub, &bf16_gemv_trans) #endif //!defined(C10_MOBILE) } // namespace at::native diff --git a/cmake/public/utils.cmake b/cmake/public/utils.cmake index c796fab1e9a..a1b2f0b5ec3 100644 --- a/cmake/public/utils.cmake +++ b/cmake/public/utils.cmake @@ -387,9 +387,8 @@ function(torch_compile_options libname) list(APPEND private_compile_options -Wunused-but-set-variable) endif() if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") - list(APPEND private_compile_options -Wunused-private-field) - endif() - if(NOT "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi) + else() list(APPEND private_compile_options # Considered to be flaky. See the discussion at # https://github.com/pytorch/pytorch/pull/9608 diff --git a/torch/csrc/DataLoader.cpp b/torch/csrc/DataLoader.cpp index 50054f529a2..7303ef5f680 100644 --- a/torch/csrc/DataLoader.cpp +++ b/torch/csrc/DataLoader.cpp @@ -70,15 +70,15 @@ SIGNAL_HANDLER( SIGBUS, handler_SIGBUS, "ERROR: Unexpected bus error encountered in worker. " - "This might be caused by insufficient shared memory (shm).\n"); + "This might be caused by insufficient shared memory (shm).\n") SIGNAL_HANDLER( SIGSEGV, handler_SIGSEGV, - "ERROR: Unexpected segmentation fault encountered in worker.\n"); + "ERROR: Unexpected segmentation fault encountered in worker.\n") SIGNAL_HANDLER( SIGFPE, handler_SIGFPE, - "ERROR: Unexpected floating-point exception encountered in worker.\n"); + "ERROR: Unexpected floating-point exception encountered in worker.\n") // When an error happened in DataLoader methods and Python starts to exit, the // error trace will keep the loader alive, and Python may kill the children diff --git a/torch/csrc/Exceptions.h b/torch/csrc/Exceptions.h index 8c1f37a10c2..7a927c3f03f 100644 --- a/torch/csrc/Exceptions.h +++ b/torch/csrc/Exceptions.h @@ -339,7 +339,7 @@ struct noop_gil_scoped_release { // user-defined constructor (i.e. not defaulted) to avoid // unused-variable warnings at usage sites of this class // NOLINTNEXTLINE(modernize-use-equals-default) - noop_gil_scoped_release(){}; + noop_gil_scoped_release() {} }; template diff --git a/torch/csrc/PyInterpreter.cpp b/torch/csrc/PyInterpreter.cpp index 57410c5cadf..6d285759e28 100644 --- a/torch/csrc/PyInterpreter.cpp +++ b/torch/csrc/PyInterpreter.cpp @@ -273,14 +273,14 @@ void ConcretePyInterpreterVTable::decref(PyObject* pyobj, bool has_pyobj_slot) } } Py_DECREF(pyobj); -}; +} void ConcretePyInterpreterVTable::incref(PyObject* pyobj) const { if (!Py_IsInitialized()) return; pybind11::gil_scoped_acquire gil; Py_INCREF(pyobj); -}; +} bool isPythonTensor(const at::Tensor& tensor) { return tensor.unsafeGetTensorImpl()->key_set().has(c10::DispatchKey::Python); diff --git a/torch/csrc/Storage.h b/torch/csrc/Storage.h index 55deb18892b..fc63d14ab93 100644 --- a/torch/csrc/Storage.h +++ b/torch/csrc/Storage.h @@ -10,7 +10,7 @@ #define THPStorageStr "torch.UntypedStorage" struct THPStorage { - PyObject_HEAD; + PyObject_HEAD c10::MaybeOwned cdata; bool is_hermetic; }; diff --git a/torch/csrc/autograd/FunctionsManual.cpp b/torch/csrc/autograd/FunctionsManual.cpp index 2af649f468c..86771b9b30a 100644 --- a/torch/csrc/autograd/FunctionsManual.cpp +++ b/torch/csrc/autograd/FunctionsManual.cpp @@ -5249,7 +5249,7 @@ static Tensor apply_simple_transformation( return condition_with_I ? K - transformation : -transformation; } } -}; +} std::tuple householder_product_backward( const Tensor& grad, diff --git a/torch/csrc/autograd/profiler_python.cpp b/torch/csrc/autograd/profiler_python.cpp index d3f3b1bd214..4b9232eff76 100644 --- a/torch/csrc/autograd/profiler_python.cpp +++ b/torch/csrc/autograd/profiler_python.cpp @@ -77,7 +77,7 @@ PyCodeObject* getCode() { return (PyCodeObject*)res; }(); return module_call_code; -}; +} template <> PyCodeObject* getCode() { @@ -92,7 +92,7 @@ PyCodeObject* getCode() { return (PyCodeObject*)res; }(); return optimizer_step_code; -}; +} } // namespace } // namespace torch::profiler::impl @@ -548,7 +548,7 @@ struct TraceKeyCacheState { // `PyEval_SetProfile`. struct ThreadLocalResults; struct TraceContext { - PyObject_HEAD; + PyObject_HEAD ThreadLocalResults* thread_local_results_; }; @@ -795,7 +795,7 @@ PythonTracer::PythonTracer(torch::profiler::impl::RecordQueue* queue) // cannot be round tripped via `sys.settrace(sys.gettrace())` PyEval_SetProfile(PythonTracer::pyProfileFn, (PyObject*)ctx); } -}; +} void PythonTracer::stop() { gil_and_restore_thread gil; diff --git a/torch/csrc/autograd/python_variable.h b/torch/csrc/autograd/python_variable.h index 32cc5c930ca..82939211eb5 100644 --- a/torch/csrc/autograd/python_variable.h +++ b/torch/csrc/autograd/python_variable.h @@ -15,7 +15,7 @@ namespace py = pybind11; // Python object that backs torch.autograd.Variable struct THPVariable { - PyObject_HEAD; + PyObject_HEAD // Payload c10::MaybeOwned cdata; // Hooks to be run on backwards pass (corresponds to Python attr diff --git a/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.cpp b/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.cpp index 54db496ea01..7134715d6a5 100644 --- a/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.cpp +++ b/torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.cpp @@ -5,7 +5,7 @@ namespace torch::distributed::autograd { CleanupAutogradContextReq::CleanupAutogradContextReq(int64_t context_id) - : context_id_(context_id){}; + : context_id_(context_id) {} int64_t CleanupAutogradContextReq::getContextId() { return context_id_; diff --git a/torch/csrc/distributed/c10d/DMAConnectivity.cpp b/torch/csrc/distributed/c10d/DMAConnectivity.cpp index 50c34f62426..3e5efa19049 100644 --- a/torch/csrc/distributed/c10d/DMAConnectivity.cpp +++ b/torch/csrc/distributed/c10d/DMAConnectivity.cpp @@ -65,7 +65,7 @@ class DetectorMap { cached_; }; -}; // namespace +} // namespace namespace c10d { diff --git a/torch/csrc/distributed/c10d/init.cpp b/torch/csrc/distributed/c10d/init.cpp index 58bc7d14bdb..5e7cb1aed7e 100644 --- a/torch/csrc/distributed/c10d/init.cpp +++ b/torch/csrc/distributed/c10d/init.cpp @@ -139,7 +139,7 @@ class IntrusivePtrNoGilDestructor { } // anonymous namespace -PYBIND11_DECLARE_HOLDER_TYPE(T, IntrusivePtrNoGilDestructor, true); +PYBIND11_DECLARE_HOLDER_TYPE(T, IntrusivePtrNoGilDestructor, true) namespace torch::distributed::c10d { diff --git a/torch/csrc/distributed/c10d/reducer.cpp b/torch/csrc/distributed/c10d/reducer.cpp index bf21bab37ce..e31431ef271 100644 --- a/torch/csrc/distributed/c10d/reducer.cpp +++ b/torch/csrc/distributed/c10d/reducer.cpp @@ -43,7 +43,7 @@ C10_DEFINE_TYPED_REGISTRY( // NOLINT c10::DeviceType, Timer, std::unique_ptr, - c10::Device); + c10::Device) namespace { @@ -67,7 +67,7 @@ class CpuTimer : public Timer { } }; -C10_REGISTER_TYPED_CLASS(TimerRegistry, c10::kCPU, CpuTimer); +C10_REGISTER_TYPED_CLASS(TimerRegistry, c10::kCPU, CpuTimer) std::vector extractTensors(const c10::IValue& result) { if (result.isPyObject()) { diff --git a/torch/csrc/distributed/rpc/metrics/registry.cpp b/torch/csrc/distributed/rpc/metrics/registry.cpp index b787390fda5..c70a5f1a711 100644 --- a/torch/csrc/distributed/rpc/metrics/registry.cpp +++ b/torch/csrc/distributed/rpc/metrics/registry.cpp @@ -3,5 +3,5 @@ namespace torch::distributed::rpc { C10_DEFINE_REGISTRY( RpcMetricsHandlerRegistry, - torch::distributed::rpc::RpcMetricsHandler); + torch::distributed::rpc::RpcMetricsHandler) } // namespace torch::distributed::rpc diff --git a/torch/csrc/dynamo/guards.cpp b/torch/csrc/dynamo/guards.cpp index f538085b083..c39ac420ced 100644 --- a/torch/csrc/dynamo/guards.cpp +++ b/torch/csrc/dynamo/guards.cpp @@ -229,7 +229,7 @@ namespace { typedef std::vector ChecksList; typedef struct { - PyObject_HEAD; + PyObject_HEAD ChecksList* checks; } TensorGuards; @@ -510,7 +510,7 @@ static PyTypeObject TensorGuardsType = { PyVarObject_HEAD_INIT(nullptr, 0) // merged. // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct GlobalStateGuard { - PyObject_HEAD; + PyObject_HEAD inline void init() { auto& ctx = at::globalContext(); diff --git a/torch/csrc/dynamo/init.cpp b/torch/csrc/dynamo/init.cpp index 16a3f1e2c97..ce8f5db1c0a 100644 --- a/torch/csrc/dynamo/init.cpp +++ b/torch/csrc/dynamo/init.cpp @@ -15,7 +15,7 @@ static struct PyModuleDef _module = {PyModuleDef_HEAD_INIT, "torch._C._dynamo", "", -1, nullptr}; -PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(std::vector) namespace torch::dynamo { diff --git a/torch/csrc/jit/api/function_impl.cpp b/torch/csrc/jit/api/function_impl.cpp index b6aa3703a52..820ecef66a8 100644 --- a/torch/csrc/jit/api/function_impl.cpp +++ b/torch/csrc/jit/api/function_impl.cpp @@ -13,6 +13,7 @@ #include #endif +// clang-format off C10_DEFINE_bool( torch_jit_do_not_store_optimized_graph, false, diff --git a/torch/csrc/jit/passes/dtype_analysis.cpp b/torch/csrc/jit/passes/dtype_analysis.cpp index 3aee83379bd..9cbe6a93623 100644 --- a/torch/csrc/jit/passes/dtype_analysis.cpp +++ b/torch/csrc/jit/passes/dtype_analysis.cpp @@ -61,7 +61,7 @@ std::unique_ptr MTensorArgumentCreator(Node* n) { } } return stack; -}; +} bool MTensorNodeArgValid(Value* value) { auto tensor_type = value->type()->cast(); diff --git a/torch/csrc/jit/passes/onnx/naming.cpp b/torch/csrc/jit/passes/onnx/naming.cpp index 432c5efd1e5..692d60a2d3d 100644 --- a/torch/csrc/jit/passes/onnx/naming.cpp +++ b/torch/csrc/jit/passes/onnx/naming.cpp @@ -79,7 +79,7 @@ namespace { class NodeNameGenerator { public: - NodeNameGenerator(std::shared_ptr g) : graph_(std::move(g)){}; + NodeNameGenerator(std::shared_ptr g) : graph_(std::move(g)) {} virtual ~NodeNameGenerator() = 0; void PopulateNodeNames(); @@ -105,7 +105,7 @@ NodeNameGenerator::~NodeNameGenerator() = default; class ScopedNodeNameGenerator : public NodeNameGenerator { public: ScopedNodeNameGenerator(std::shared_ptr g) - : NodeNameGenerator(std::move(g)){}; + : NodeNameGenerator(std::move(g)) {} protected: void CreateNodeName(Node* n) override; diff --git a/torch/csrc/jit/passes/symbolic_shape_cache.cpp b/torch/csrc/jit/passes/symbolic_shape_cache.cpp index 6a265a943d5..0cca03d6f74 100644 --- a/torch/csrc/jit/passes/symbolic_shape_cache.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_cache.cpp @@ -205,5 +205,5 @@ bool operator==( const CanonicalizedSymbolicShape& a, const CanonicalizedSymbolicShape& b) { return a.values_ == b.values_; -}; +} } // namespace torch::jit diff --git a/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp b/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp index b82e5594511..723f3c9cf75 100644 --- a/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp @@ -154,7 +154,7 @@ static std::vector summarizeInputStrides(const TensorType& tt) { summarizeStrideDim(sizes, strides, dim, stride_inputs, 0)); } return stride_inputs; -}; +} // Todo: incorporate in codegen static StrideInput summarizeOutputStrides(const TensorType& tt) { diff --git a/torch/csrc/jit/passes/tensorexpr_fuser.cpp b/torch/csrc/jit/passes/tensorexpr_fuser.cpp index 9162bef4258..01ca666eb66 100644 --- a/torch/csrc/jit/passes/tensorexpr_fuser.cpp +++ b/torch/csrc/jit/passes/tensorexpr_fuser.cpp @@ -26,6 +26,7 @@ #include +// clang-format off C10_DEFINE_bool( torch_jit_disable_cat, false, diff --git a/torch/csrc/jit/python/pybind.h b/torch/csrc/jit/python/pybind.h index eb9b59d08d8..5bab3878f3b 100644 --- a/torch/csrc/jit/python/pybind.h +++ b/torch/csrc/jit/python/pybind.h @@ -65,7 +65,7 @@ class unwrapping_shared_ptr { } // namespace torch::jit -PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr, true); +PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr, true) namespace pybind11::detail { diff --git a/torch/csrc/jit/python/python_dict.h b/torch/csrc/jit/python/python_dict.h index c8433a7df6c..5e8fdbfe9a0 100644 --- a/torch/csrc/jit/python/python_dict.h +++ b/torch/csrc/jit/python/python_dict.h @@ -98,12 +98,12 @@ class ScriptDict final { // not exist. at::IValue getItem(const at::IValue& key) { return dict_.at(key); - }; + } // Set the value for the given key. void setItem(const at::IValue& key, const at::IValue& value) { dict_.insert_or_assign(key, value); - }; + } // Check whether the dictionary contains the given key. bool contains(const at::IValue& key) { diff --git a/torch/csrc/jit/python/python_list.h b/torch/csrc/jit/python/python_list.h index 783e429946f..83955a9f3d5 100644 --- a/torch/csrc/jit/python/python_list.h +++ b/torch/csrc/jit/python/python_list.h @@ -92,7 +92,7 @@ class ScriptList final { at::IValue getItem(diff_type idx) { idx = wrap_index(idx); return list_.get(idx); - }; + } // Set the value corresponding to the given index. void setItem(diff_type idx, const at::IValue& value) { diff --git a/torch/csrc/jit/python/python_sugared_value.h b/torch/csrc/jit/python/python_sugared_value.h index 314b00bcf38..15cc2445fd5 100644 --- a/torch/csrc/jit/python/python_sugared_value.h +++ b/torch/csrc/jit/python/python_sugared_value.h @@ -127,7 +127,7 @@ struct VISIBILITY_HIDDEN ConstantParameterList : public SugaredValue { struct VISIBILITY_HIDDEN ModuleDictMethod : public SugaredValue { explicit ModuleDictMethod(SugaredValuePtr iterable, std::string name) - : iterable_(std::move(iterable)), name_(std::move(name)){}; + : iterable_(std::move(iterable)), name_(std::move(name)) {} std::string kind() const override { return name_; @@ -286,7 +286,7 @@ struct VISIBILITY_HIDDEN SugaredDict : public SugaredValue { SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override { return keys_; - }; + } std::shared_ptr self_; std::shared_ptr keys_; diff --git a/torch/csrc/jit/runtime/argument_spec.cpp b/torch/csrc/jit/runtime/argument_spec.cpp index f8c99f40290..48e45ab7a3a 100644 --- a/torch/csrc/jit/runtime/argument_spec.cpp +++ b/torch/csrc/jit/runtime/argument_spec.cpp @@ -66,7 +66,7 @@ void ArgumentSpecCreator::scan( } else { instructions_.emplace_back(SKIP); } -}; +} // this is a coarse-grained guarantee that the slots of a class will not be // modified by the function. It works fine for things that used be read-only diff --git a/torch/csrc/jit/runtime/graph_executor.cpp b/torch/csrc/jit/runtime/graph_executor.cpp index 7b948911df7..8f89584fb7c 100644 --- a/torch/csrc/jit/runtime/graph_executor.cpp +++ b/torch/csrc/jit/runtime/graph_executor.cpp @@ -53,6 +53,7 @@ #include #include +// clang-format off C10_DEFINE_bool( torch_jit_execution_plan_reuse_code_graph, false, diff --git a/torch/csrc/jit/runtime/interpreter.cpp b/torch/csrc/jit/runtime/interpreter.cpp index b3545a3666e..d42c4c69b6f 100644 --- a/torch/csrc/jit/runtime/interpreter.cpp +++ b/torch/csrc/jit/runtime/interpreter.cpp @@ -46,6 +46,7 @@ using torch::distributed::autograd::DistAutogradContainer; #include #include +// clang-format off C10_DEFINE_bool( torch_jit_enable_rethrow_caught_exception, false, diff --git a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp index af9deaf476c..98acf24dd1d 100644 --- a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp +++ b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp @@ -38,6 +38,7 @@ #include #include +// clang-format off C10_DEFINE_bool( torch_jit_enable_new_executor, true, diff --git a/torch/csrc/jit/runtime/static/impl.cpp b/torch/csrc/jit/runtime/static/impl.cpp index a503d6112ac..8eef32b9c95 100644 --- a/torch/csrc/jit/runtime/static/impl.cpp +++ b/torch/csrc/jit/runtime/static/impl.cpp @@ -47,6 +47,7 @@ #endif // used in test only +// clang-format off C10_DEFINE_bool( static_runtime_disable_debug_memory_overlap_check, false, diff --git a/torch/csrc/jit/runtime/static/ops.cpp b/torch/csrc/jit/runtime/static/ops.cpp index a3e01e307f5..de41be15889 100644 --- a/torch/csrc/jit/runtime/static/ops.cpp +++ b/torch/csrc/jit/runtime/static/ops.cpp @@ -40,6 +40,7 @@ #include +// clang-format off C10_DEFINE_bool( static_runtime_enable_fast_math, true, diff --git a/torch/csrc/jit/runtime/static/passes.cpp b/torch/csrc/jit/runtime/static/passes.cpp index 8214c374ddf..0632970e1ca 100644 --- a/torch/csrc/jit/runtime/static/passes.cpp +++ b/torch/csrc/jit/runtime/static/passes.cpp @@ -9,6 +9,7 @@ #include #include +// clang-format off C10_DEFINE_bool( enable_clip_ranges_gather_fusions, true, diff --git a/torch/csrc/jit/testing/file_check.cpp b/torch/csrc/jit/testing/file_check.cpp index ba36ab2bb9c..c17a84ac191 100644 --- a/torch/csrc/jit/testing/file_check.cpp +++ b/torch/csrc/jit/testing/file_check.cpp @@ -85,7 +85,7 @@ std::ostream& operator<<(std::ostream& out, const Check& c) { } out << ": " << c.search_str_; return out; -}; +} namespace { diff --git a/torch/csrc/lazy/core/config.cpp b/torch/csrc/lazy/core/config.cpp index ce6ce27ca17..00bf247cabd 100644 --- a/torch/csrc/lazy/core/config.cpp +++ b/torch/csrc/lazy/core/config.cpp @@ -1,7 +1,7 @@ #include #include -C10_DEFINE_bool(torch_lazy_ir_debug, false, "Enable lazy tensor IR debugging"); +C10_DEFINE_bool(torch_lazy_ir_debug, false, "Enable lazy tensor IR debugging") C10_DEFINE_bool( torch_lazy_param_aliasing, diff --git a/torch/csrc/lazy/core/ir.cpp b/torch/csrc/lazy/core/ir.cpp index 6bb2c0ba0e9..8dac3e563cb 100644 --- a/torch/csrc/lazy/core/ir.cpp +++ b/torch/csrc/lazy/core/ir.cpp @@ -7,6 +7,7 @@ // Enables caching on for dynamic shapes (aka disable hash on shapes) // NOLINTNEXTLINE(misc-use-internal-linkage) +// clang-format off C10_DEFINE_bool( ltc_enable_dynamic_shapes, false, diff --git a/torch/csrc/lazy/core/shape.cpp b/torch/csrc/lazy/core/shape.cpp index 359fb7a9665..f59684d4ffc 100644 --- a/torch/csrc/lazy/core/shape.cpp +++ b/torch/csrc/lazy/core/shape.cpp @@ -9,7 +9,7 @@ C10_DEFINE_bool( ltc_enable_symbolic_shapes, false, - "Enables calculation of if dims are symbolic"); + "Enables calculation of if dims are symbolic") namespace torch::lazy { diff --git a/torch/csrc/lazy/ts_backend/config.cpp b/torch/csrc/lazy/ts_backend/config.cpp index ee2ce424262..ec098d4dc6e 100644 --- a/torch/csrc/lazy/ts_backend/config.cpp +++ b/torch/csrc/lazy/ts_backend/config.cpp @@ -5,7 +5,7 @@ C10_DEFINE_bool( torch_lazy_ts_tensor_update_sync, true, - "Use synchronous copy inside _copy_from op"); + "Use synchronous copy inside _copy_from op") // TODO(whc) we need to hook up these flags in a more useful way // possibly also keep LTC_TS_CUDA env working? @@ -13,4 +13,4 @@ C10_DEFINE_bool( C10_DEFINE_bool( torch_lazy_ts_cuda, false, - "Use cuda device for torchscript backend (instead of CPU)"); + "Use cuda device for torchscript backend (instead of CPU)") diff --git a/torch/csrc/profiler/python/init.h b/torch/csrc/profiler/python/init.h index 28fae14988a..a14cc213e8f 100644 --- a/torch/csrc/profiler/python/init.h +++ b/torch/csrc/profiler/python/init.h @@ -12,12 +12,12 @@ using torch::profiler::impl::TensorID; template <> \ struct type_caster : public strong_pointer_type_caster {}; -STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::StorageImplData); -STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::AllocationID); -STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::TensorImplAddress); -STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleSelf); -STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleCls); -STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyOptimizerSelf); +STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::StorageImplData) +STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::AllocationID) +STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::TensorImplAddress) +STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleSelf) +STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleCls) +STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyOptimizerSelf) #undef STRONG_POINTER_TYPE_CASTER template <> diff --git a/torch/csrc/utils/invalid_arguments.cpp b/torch/csrc/utils/invalid_arguments.cpp index d26f8c2ee1d..c2825f7d945 100644 --- a/torch/csrc/utils/invalid_arguments.cpp +++ b/torch/csrc/utils/invalid_arguments.cpp @@ -27,7 +27,7 @@ struct Type { }; struct SimpleType : public Type { - SimpleType(std::string& name) : name(name){}; + SimpleType(std::string& name) : name(name) {} bool is_matching(PyObject* object) override { return py_typename(object) == name; @@ -38,7 +38,7 @@ struct SimpleType : public Type { struct MultiType : public Type { MultiType(std::initializer_list accepted_types) - : types(accepted_types){}; + : types(accepted_types) {} bool is_matching(PyObject* object) override { auto it = std::find(types.begin(), types.end(), py_typename(object)); @@ -49,7 +49,7 @@ struct MultiType : public Type { }; struct NullableType : public Type { - NullableType(std::unique_ptr type) : type(std::move(type)){}; + NullableType(std::unique_ptr type) : type(std::move(type)) {} bool is_matching(PyObject* object) override { return object == Py_None || type->is_matching(object); @@ -60,7 +60,7 @@ struct NullableType : public Type { struct TupleType : public Type { TupleType(std::vector> types) - : types(std::move(types)){}; + : types(std::move(types)) {} bool is_matching(PyObject* object) override { if (!PyTuple_Check(object)) @@ -79,7 +79,7 @@ struct TupleType : public Type { }; struct SequenceType : public Type { - SequenceType(std::unique_ptr type) : type(std::move(type)){}; + SequenceType(std::unique_ptr type) : type(std::move(type)) {} bool is_matching(PyObject* object) override { if (!PySequence_Check(object)) @@ -99,7 +99,7 @@ struct SequenceType : public Type { struct Argument { Argument(std::string name, std::unique_ptr type) - : name(std::move(name)), type(std::move(type)){}; + : name(std::move(name)), type(std::move(type)) {} std::string name; std::unique_ptr type; @@ -109,9 +109,9 @@ struct Option { Option(std::vector arguments, bool is_variadic, bool has_out) : arguments(std::move(arguments)), is_variadic(is_variadic), - has_out(has_out){}; + has_out(has_out) {} Option(bool is_variadic, bool has_out) - : arguments(), is_variadic(is_variadic), has_out(has_out){}; + : arguments(), is_variadic(is_variadic), has_out(has_out) {} Option(const Option&) = delete; Option(Option&& other) noexcept = default; Option& operator=(const Option&) = delete; diff --git a/torch/csrc/utils/object_ptr.h b/torch/csrc/utils/object_ptr.h index 70a887860ff..983a7a2ae07 100644 --- a/torch/csrc/utils/object_ptr.h +++ b/torch/csrc/utils/object_ptr.h @@ -7,15 +7,15 @@ template class TORCH_PYTHON_API THPPointer { public: - THPPointer() : ptr(nullptr){}; - explicit THPPointer(T* ptr) noexcept : ptr(ptr){}; + THPPointer() : ptr(nullptr) {} + explicit THPPointer(T* ptr) noexcept : ptr(ptr) {} THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {} THPPointer(const THPPointer& p) = delete; THPPointer& operator=(const THPPointer&) = delete; ~THPPointer() { free(); - }; + } T* get() { return ptr; } diff --git a/torch/csrc/utils/pybind.h b/torch/csrc/utils/pybind.h index 3c0f32864a0..a22a08cc222 100644 --- a/torch/csrc/utils/pybind.h +++ b/torch/csrc/utils/pybind.h @@ -24,10 +24,10 @@ namespace py = pybind11; // This makes intrusive_ptr to be available as a custom pybind11 holder type, // see // https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers -PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr, true); +PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr, true) -PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr); -PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr, true); +PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr) +PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr, true) namespace pybind11::detail { diff --git a/torch/csrc/utils/python_symnode.h b/torch/csrc/utils/python_symnode.h index 5a1f43d1bc8..43ef85ad8fc 100644 --- a/torch/csrc/utils/python_symnode.h +++ b/torch/csrc/utils/python_symnode.h @@ -35,7 +35,7 @@ class PythonSymNodeImpl : public c10::SymNodeImpl { PythonSymNodeImpl(py::object pyobj) : c10::SymNodeImpl() { pyobj_ = std::make_shared( pyobj.release().ptr(), getPyInterpreter()); - }; + } c10::SymNode wrap_int(int64_t num) override { py::gil_scoped_acquire acquire;