Move from/to to torch::stable::detail (#164956)

To not pollute the global namespace, we should move the `from`/`to` APIs into torch::stable::detail. We are also following our normal deprecation cycle and choosing to continue exposing the global `from`/`to` for the time being as people who onboard their extensions onto 2.9 would not be able to build with 2.10 otherwise.

Note that this means that within libtorch, we do not get the luxury of tacking on a `using torch::stable::detail::from` because then it leads to build time ambiguous calls --> both the global and namespace APIs are exposed, which one do I want? So that is why you see every local site is updated.

Note that the update is _not_ necessary from a custom op writer point of view. FA3 can continue to build on torch nightlies without changing any code. (Since this is a header change, this PR has no implication on runtime, a previously built FA3 ABI stable wheel will continue to work fine with newer torch versions after this PR.)

Once TORCH_BOX lands, we would be free to remove these global APIs when the deprecation cycle is up (April 2026) and encourage people to use TORCH_BOX and avoid from/to entirely.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/164956
Approved by: https://github.com/malfet
ghstack dependencies: #164882
This commit is contained in:
Jane Xu 2025-10-21 00:30:13 +00:00 committed by PyTorch MergeBot
parent 0be0de4ffa
commit fe69a2bbbd
5 changed files with 107 additions and 60 deletions

View File

@ -1240,18 +1240,18 @@ class TestCppExtensionJIT(common.TestCase):
at::Tensor my_abs(at::Tensor x) {
StableIValue stack[1];
RAIIATH raii(torch::aot_inductor::new_tensor_handle(std::move(x)));
stack[0] = from(raii.release());
stack[0] = torch::stable::detail::from(raii.release());
aoti_torch_call_dispatcher("aten::abs", "", stack);
RAIIATH res(to<AtenTensorHandle>(stack[0]));
RAIIATH res(torch::stable::detail::to<AtenTensorHandle>(stack[0]));
return *reinterpret_cast<at::Tensor*>(res.release());
}
at::Tensor my_floor(at::Tensor x) {
StableIValue stack[1];
RAIIATH raii(torch::aot_inductor::new_tensor_handle(std::move(x)));
stack[0] = from(raii.release());
stack[0] = torch::stable::detail::from(raii.release());
aoti_torch_call_dispatcher("aten::floor", "", stack);
RAIIATH res(to<AtenTensorHandle>(stack[0]));
RAIIATH res(torch::stable::detail::to<AtenTensorHandle>(stack[0]));
return *reinterpret_cast<at::Tensor*>(res.release());
}
"""

View File

@ -1413,28 +1413,28 @@ static StableIValue from_ivalue(
case c10::TypeKind::TensorType: {
AtenTensorHandle ath = torch::aot_inductor::new_tensor_handle(
std::move(const_cast<at::Tensor&>(ivalue.toTensor())));
return from(ath);
return torch::stable::detail::from(ath);
}
case c10::TypeKind::IntType: {
return from(ivalue.toInt());
return torch::stable::detail::from(ivalue.toInt());
}
case c10::TypeKind::FloatType: {
return from(ivalue.toDouble());
return torch::stable::detail::from(ivalue.toDouble());
}
case c10::TypeKind::BoolType: {
return from(ivalue.toBool());
return torch::stable::detail::from(ivalue.toBool());
}
case c10::TypeKind::ScalarTypeType: {
return from(ivalue.toScalarType());
return torch::stable::detail::from(ivalue.toScalarType());
}
case c10::TypeKind::DeviceObjType: {
return from(ivalue.toDevice());
return torch::stable::detail::from(ivalue.toDevice());
}
case c10::TypeKind::LayoutType: {
return from(ivalue.toLayout());
return torch::stable::detail::from(ivalue.toLayout());
}
case c10::TypeKind::MemoryFormatType: {
return from(ivalue.toMemoryFormat());
return torch::stable::detail::from(ivalue.toMemoryFormat());
}
case c10::TypeKind::OptionalType: {
auto inner_type = type->castRaw<at::OptionalType>()->getElementType();
@ -1444,17 +1444,18 @@ static StableIValue from_ivalue(
// able to follow the patterned semantic of every other case here in one
// line:
//
// return from<std::optional<inner_type::t>>(ivalue.toInnerTypeT()));
// return
// torch::stable::detail::from<std::optional<inner_type::t>>(ivalue.toInnerTypeT()));
//
// BUT we do NOT have that type inner_type::t readily available, so we
// will manually unwrap and recursively call. This implementation MUST
// be kept in sync with from<std::optional<T>> function in
// torch/csrc/stable/library.h
// be kept in sync with torch::stable::detail::from<std::optional<T>>
// function in torch/csrc/stable/stableivalue_conversions.h
if (ivalue.isNone()) {
return from(std::nullopt);
return torch::stable::detail::from(std::nullopt);
}
StableIValue* sivp = new StableIValue(from_ivalue(inner_type, ivalue));
return from(sivp);
return torch::stable::detail::from(sivp);
}
default: {
TORCH_CHECK(
@ -1471,30 +1472,32 @@ static c10::IValue to_ivalue(
switch (type->kind()) {
case c10::TypeKind::TensorType: {
auto ret_raiiath = torch::aot_inductor::RAIIAtenTensorHandle(
to<AtenTensorHandle>(stable_ivalue));
torch::stable::detail::to<AtenTensorHandle>(stable_ivalue));
return (c10::IValue(*torch::aot_inductor::tensor_handle_to_tensor_pointer(
ret_raiiath.get())));
}
case c10::TypeKind::IntType: {
return c10::IValue(to<int64_t>(stable_ivalue));
return c10::IValue(torch::stable::detail::to<int64_t>(stable_ivalue));
}
case c10::TypeKind::FloatType: {
return c10::IValue(to<double>(stable_ivalue));
return c10::IValue(torch::stable::detail::to<double>(stable_ivalue));
}
case c10::TypeKind::BoolType: {
return c10::IValue(to<bool>(stable_ivalue));
return c10::IValue(torch::stable::detail::to<bool>(stable_ivalue));
}
case c10::TypeKind::ScalarTypeType: {
return c10::IValue(to<c10::ScalarType>(stable_ivalue));
return c10::IValue(
torch::stable::detail::to<c10::ScalarType>(stable_ivalue));
}
case c10::TypeKind::DeviceObjType: {
return c10::IValue(to<c10::Device>(stable_ivalue));
return c10::IValue(torch::stable::detail::to<c10::Device>(stable_ivalue));
}
case c10::TypeKind::LayoutType: {
return c10::IValue(to<c10::Layout>(stable_ivalue));
return c10::IValue(torch::stable::detail::to<c10::Layout>(stable_ivalue));
}
case c10::TypeKind::MemoryFormatType: {
return c10::IValue(to<c10::MemoryFormat>(stable_ivalue));
return c10::IValue(
torch::stable::detail::to<c10::MemoryFormat>(stable_ivalue));
}
case c10::TypeKind::OptionalType: {
auto inner_type = type->castRaw<at::OptionalType>()->getElementType();
@ -1504,16 +1507,17 @@ static c10::IValue to_ivalue(
// able to follow the patterned semantic of every other case here in one
// line:
//
// return c10::IValue(to<std::optional<inner_type::t>>(stable_ivalue));
// return
// c10::IValue(torch::stable::detail::to<std::optional<inner_type::t>>(stable_ivalue));
//
// BUT we do NOT have that type inner_type::t readily available, so we
// will manually unwrap and recursively call. This implementation MUST
// be kept in sync with the to<T> function in
// torch/csrc/stable/library.h
if (stable_ivalue == from(std::nullopt)) {
// be kept in sync with the torch::stable::detail::to<T> function in
// torch/csrc/stable/stableivalue_conversions.h
if (stable_ivalue == torch::stable::detail::from(std::nullopt)) {
return c10::IValue();
}
auto sivp = to<StableIValue*>(stable_ivalue);
auto sivp = torch::stable::detail::to<StableIValue*>(stable_ivalue);
auto ival = to_ivalue(inner_type, *sivp);
delete sivp;
return ival;

View File

@ -18,15 +18,15 @@ namespace torch::stable {
inline torch::stable::Tensor empty_like(const torch::stable::Tensor& self) {
const auto num_args = 6;
std::array<StableIValue, num_args> stack{
from(self),
from(std::nullopt),
from(std::nullopt),
from(std::nullopt),
from(std::nullopt),
from(std::nullopt)};
torch::stable::detail::from(self),
torch::stable::detail::from(std::nullopt),
torch::stable::detail::from(std::nullopt),
torch::stable::detail::from(std::nullopt),
torch::stable::detail::from(std::nullopt),
torch::stable::detail::from(std::nullopt)};
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::empty_like", "", stack.data()));
return to<torch::stable::Tensor>(stack[0]);
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
// We expect this to be the stable version of the fill_.Scalar op
@ -71,7 +71,8 @@ inline torch::stable::Tensor new_empty(
int32_t target_dtype;
if (dtype.has_value()) {
target_dtype = to<int32_t>(from(dtype.value()));
target_dtype = torch::stable::detail::to<int32_t>(
torch::stable::detail::from(dtype.value()));
} else {
TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(self.get(), &target_dtype));
}
@ -109,7 +110,8 @@ inline torch::stable::Tensor new_zeros(
int32_t target_dtype;
if (dtype.has_value()) {
target_dtype = to<int32_t>(from(dtype.value()));
target_dtype = torch::stable::detail::to<int32_t>(
torch::stable::detail::from(dtype.value()));
} else {
TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(self.get(), &target_dtype));
}
@ -194,10 +196,13 @@ inline torch::stable::Tensor transpose(
int64_t dim0,
int64_t dim1) {
const auto num_args = 3;
std::array<StableIValue, num_args> stack{from(self), from(dim0), from(dim1)};
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(dim0),
torch::stable::detail::from(dim1)};
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::transpose", "int", stack.data()));
return to<torch::stable::Tensor>(stack[0]);
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
// We expect this to be the stable version of the zero_ op with identical
@ -205,10 +210,10 @@ inline torch::stable::Tensor transpose(
// a tensor method but only as a function i.e. zero_(t) not t.zero_()).
inline torch::stable::Tensor zero_(torch::stable::Tensor& self) {
const auto num_args = 1;
std::array<StableIValue, num_args> stack{from(self)};
std::array<StableIValue, num_args> stack{torch::stable::detail::from(self)};
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::zero_", "", stack.data()));
return to<torch::stable::Tensor>(stack[0]);
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
// We expect this to be the stable version of the copy_ op with
@ -219,20 +224,24 @@ inline torch::stable::Tensor copy_(
std::optional<bool> non_blocking = std::nullopt) {
const auto num_args = 3;
std::array<StableIValue, num_args> stack{
from(self), from(src), from(non_blocking.value_or(false))};
torch::stable::detail::from(self),
torch::stable::detail::from(src),
torch::stable::detail::from(non_blocking.value_or(false))};
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::copy_", "", stack.data()));
return to<torch::stable::Tensor>(stack[0]);
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
// We expect this to be the stable version of the clone op. We will
// add optional memory_format kwarg support in the future.
inline torch::stable::Tensor clone(const torch::stable::Tensor& self) {
const auto num_args = 2;
std::array<StableIValue, num_args> stack{from(self), from(std::nullopt)};
std::array<StableIValue, num_args> stack{
torch::stable::detail::from(self),
torch::stable::detail::from(std::nullopt)};
TORCH_ERROR_CODE_CHECK(
aoti_torch_call_dispatcher("aten::clone", "", stack.data()));
return to<torch::stable::Tensor>(stack[0]);
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
}
} // namespace torch::stable

View File

@ -9,6 +9,8 @@
#include <optional>
namespace torch::stable::detail {
// forward declare so that the from/to() implementations in the detail
// namespace of library.h where the real work is done can compile.
template <typename T>
@ -17,15 +19,8 @@ template <typename T>
T to(StableIValue val);
// =============================================================================
// helpers for converting between StableIValue and T
// Below are the helpers for converting between StableIValue and T
// =============================================================================
// note that the signatures for from and to are forward declared in
// stable/stableivalue_conversions.h but defined below to avoid circular
// dependencies where other headers (like tensor-inl.h) will need to/from.
namespace detail {
// =============================================================================
// FROM CONVERSIONS (T -> StableIValue)
// =============================================================================
@ -314,7 +309,9 @@ struct ToImpl<torch::stable::Tensor> {
}
};
} // namespace detail
// =============================================================================
// end to helpers for converting between StableIValue and T
// =============================================================================
// Expose the partially templated class functions through single functions
template <typename T>
@ -338,6 +335,42 @@ inline T to(StableIValue val) {
return detail::ToImpl<T>::call(val);
}
// =============================================================================
// end to helpers for converting between StableIValue and T
// =============================================================================
} // namespace torch::stable::detail
// [global from/to deprecation note]
// WARNING! the following APIs will be removed!! We deprecated global from/to
// (in 2.10) in favor of torch::stable::detail from/to to not pollute the global
// namespace. We are only including the following wrappers for backwards
// compatibility.
// WARNING! Will be removed. Only exists for BC. See [global from/to deprecation
// note]
template <typename T>
[[deprecated("Use torch::stable::detail::from instead.")]]
inline StableIValue from(T val) {
return torch::stable::detail::from(val);
}
// WARNING! Will be removed. Only exists for BC. See [global from/to deprecation
// note]
template <typename T>
[[deprecated("Use torch::stable::detail::from instead.")]]
inline StableIValue from(const std::optional<T>& val) {
return torch::stable::detail::from(val);
}
// WARNING! Will be removed. Only exists for BC. See [global from/to deprecation
// note]
[[deprecated(
"Use torch::stable::detail::from instead.")]] [[maybe_unused]] inline StableIValue
from(const torch::stable::Tensor& val) {
return torch::stable::detail::from(val);
}
// WARNING! Will be removed. Only exists for BC. See [global from/to deprecation
// note]
template <typename T>
[[deprecated("Use torch::stable::detail::to instead.")]]
inline T to(StableIValue val) {
return torch::stable::detail::to<T>(val);
}

View File

@ -17,7 +17,8 @@ using torch::headeronly::ScalarType;
inline ScalarType Tensor::scalar_type() const {
int32_t dtype;
TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(ath_.get(), &dtype));
return to<ScalarType>(from(dtype));
return torch::stable::detail::to<ScalarType>(
torch::stable::detail::from(dtype));
}
} // namespace torch::stable