[2/N] Change static functions in headers to inline (#127764)

Follows #127727

Pull Request resolved: https://github.com/pytorch/pytorch/pull/127764
Approved by: https://github.com/Skylion007
This commit is contained in:
cyy 2024-06-04 00:49:01 +00:00 committed by PyTorch MergeBot
parent dbf39a6e63
commit 05fa05cbae
10 changed files with 23 additions and 30 deletions

View File

@ -142,7 +142,7 @@ struct C10_API SourceLocation {
std::ostream& operator<<(std::ostream& out, const SourceLocation& loc);
// unix isprint but insensitive to locale
inline static bool isPrint(char s) {
inline bool isPrint(char s) {
return s > 0x1f && s < 0x7f;
}

View File

@ -16,7 +16,7 @@ namespace c10 {
/// Returns false since we cannot have x < 0 if x is unsigned.
template <typename T>
static inline constexpr bool is_negative(
inline constexpr bool is_negative(
const T& /*x*/,
std::true_type /*is_unsigned*/) {
return false;
@ -24,9 +24,7 @@ static inline constexpr bool is_negative(
/// Returns true if a signed variable x < 0
template <typename T>
static inline constexpr bool is_negative(
const T& x,
std::false_type /*is_unsigned*/) {
inline constexpr bool is_negative(const T& x, std::false_type /*is_unsigned*/) {
return x < T(0);
}
@ -42,15 +40,13 @@ inline constexpr bool is_negative(const T& x) {
/// Returns the sign of an unsigned variable x as 0, 1
template <typename T>
static inline constexpr int signum(const T& x, std::true_type /*is_unsigned*/) {
inline constexpr int signum(const T& x, std::true_type /*is_unsigned*/) {
return T(0) < x;
}
/// Returns the sign of a signed variable x as -1, 0, 1
template <typename T>
static inline constexpr int signum(
const T& x,
std::false_type /*is_unsigned*/) {
inline constexpr int signum(const T& x, std::false_type /*is_unsigned*/) {
return (T(0) < x) - (x < T(0));
}
@ -92,7 +88,7 @@ inline constexpr bool greater_than_max(const T& x) {
/// Returns true if x < lowest(Limit). Standard comparison
template <typename Limit, typename T>
static inline constexpr bool less_than_lowest(
inline constexpr bool less_than_lowest(
const T& x,
std::false_type /*limit_is_unsigned*/,
std::false_type /*x_is_unsigned*/) {
@ -102,7 +98,7 @@ static inline constexpr bool less_than_lowest(
/// Returns false since all the limit is signed and therefore includes
/// negative values but x cannot be negative because it is unsigned
template <typename Limit, typename T>
static inline constexpr bool less_than_lowest(
inline constexpr bool less_than_lowest(
const T& /*x*/,
std::false_type /*limit_is_unsigned*/,
std::true_type /*x_is_unsigned*/) {
@ -112,7 +108,7 @@ static inline constexpr bool less_than_lowest(
/// Returns true if x < 0, where 0 is constructed from T.
/// Limit is not signed, so its lower value is zero
template <typename Limit, typename T>
static inline constexpr bool less_than_lowest(
inline constexpr bool less_than_lowest(
const T& x,
std::true_type /*limit_is_unsigned*/,
std::false_type /*x_is_unsigned*/) {
@ -121,7 +117,7 @@ static inline constexpr bool less_than_lowest(
/// Returns false sign both types are unsigned
template <typename Limit, typename T>
static inline constexpr bool less_than_lowest(
inline constexpr bool less_than_lowest(
const T& /*x*/,
std::true_type /*limit_is_unsigned*/,
std::true_type /*x_is_unsigned*/) {

View File

@ -19,7 +19,7 @@
#include <torch/csrc/distributed/c10d/exception.h>
#endif
static inline void PyErr_SetString(PyObject* type, const std::string& message) {
inline void PyErr_SetString(PyObject* type, const std::string& message) {
PyErr_SetString(type, message.c_str());
}
/// NOTE [ Conversion Cpp Python Warning ]

View File

@ -23,11 +23,11 @@ TORCH_PYTHON_API PyObject* THPStorage_NewWithStorage(
bool allow_preexisting_pyobj = false);
extern PyTypeObject* THPStorageClass;
static inline bool THPStorage_CheckTypeExact(PyTypeObject* tp) {
inline bool THPStorage_CheckTypeExact(PyTypeObject* tp) {
return tp == THPStorageClass;
}
static inline bool THPStorage_CheckExact(PyObject* obj) {
inline bool THPStorage_CheckExact(PyObject* obj) {
return THPStorage_CheckTypeExact(Py_TYPE(obj));
}

View File

@ -39,7 +39,7 @@ TORCH_PYTHON_API extern PyObject* ParameterClass;
bool THPVariable_initModule(PyObject* module);
TORCH_PYTHON_API PyObject* THPVariable_Wrap(at::TensorBase var);
static inline bool THPVariable_CheckTypeExact(PyTypeObject* tp) {
inline bool THPVariable_CheckTypeExact(PyTypeObject* tp) {
// Check that a python object is a `Tensor`, but not a `Tensor` subclass.
// (A subclass could have different semantics.) The one exception is
// Parameter, which is used for Python bookkeeping but is equivalent to
@ -49,7 +49,7 @@ static inline bool THPVariable_CheckTypeExact(PyTypeObject* tp) {
tp == (PyTypeObject*)ParameterClass);
}
static inline bool THPVariable_CheckExact(PyObject* obj) {
inline bool THPVariable_CheckExact(PyObject* obj) {
return THPVariable_CheckTypeExact(Py_TYPE(obj));
}

View File

@ -15,7 +15,7 @@ struct UnpackedSlice {
};
// This mirrors Cpython's PySlice_Unpack method
static inline UnpackedSlice __PySlice_Unpack(PyObject* _r) {
inline UnpackedSlice __PySlice_Unpack(PyObject* _r) {
PySliceObject* r = (PySliceObject*)_r;
/* this is harder to get right than you might think */

View File

@ -88,7 +88,7 @@ namespace detail {
TORCH_CUDA_CPP_API void throw_nccl_error(ncclResult status);
static inline void NCCL_CHECK(ncclResult status) {
inline void NCCL_CHECK(ncclResult status) {
if (status != ncclResult::Success) {
throw_nccl_error(status);
}

View File

@ -1,9 +1,8 @@
#pragma once
namespace torch {
namespace utils {
namespace torch::utils {
static inline bool cuda_enabled() {
inline constexpr bool cuda_enabled() {
#ifdef USE_CUDA
return true;
#else
@ -11,5 +10,4 @@ static inline bool cuda_enabled() {
#endif
}
} // namespace utils
} // namespace torch
} // namespace torch::utils

View File

@ -26,21 +26,21 @@ namespace torch::utils {
void device_lazy_init(at::DeviceType device_type);
void set_requires_device_init(at::DeviceType device_type, bool value);
static inline void maybe_initialize_device(at::Device& device) {
inline void maybe_initialize_device(at::Device& device) {
// Add more devices here to enable lazy initialization.
if (device.is_cuda() || device.is_xpu() || device.is_privateuseone()) {
device_lazy_init(device.type());
}
}
static inline void maybe_initialize_device(std::optional<at::Device>& device) {
inline void maybe_initialize_device(std::optional<at::Device>& device) {
if (!device.has_value()) {
return;
}
maybe_initialize_device(device.value());
}
static inline void maybe_initialize_device(const at::TensorOptions& options) {
inline void maybe_initialize_device(const at::TensorOptions& options) {
auto device = options.device();
maybe_initialize_device(device);
}

View File

@ -100,8 +100,7 @@ inline void THPUtils_internStringInPlace(PyObject** obj) {
*
*/
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
static py::object PyObject_FastGetAttrString(PyObject* obj, const char* name) {
inline py::object PyObject_FastGetAttrString(PyObject* obj, const char* name) {
PyTypeObject* tp = Py_TYPE(obj);
PyObject* res = (PyObject*)nullptr;