[codemod] c10:optional -> std::optional (#126135)

Generated by running the following from PyTorch root:
```
find . -regex ".*\.\(cpp\|h\|cu\|hpp\|cc\|cxx\)$" | grep -v "build/" | xargs -n 50 -P 4 perl -pi -e 's/c10::optional/std::optional/'
```

`c10::optional` is just an alias for `std::optional`. This removes usages of that alias in preparation for eliminating it entirely.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/126135
Approved by: https://github.com/Skylion007, https://github.com/malfet, https://github.com/albanD, https://github.com/aaronenyeshi
This commit is contained in:
Richard Barnes 2024-05-14 19:35:49 +00:00 committed by PyTorch MergeBot
parent b55f57b7af
commit ed327876f5
907 changed files with 5655 additions and 5655 deletions

View File

@ -81,8 +81,8 @@ inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) {
CPUGeneratorImpl::CPUGeneratorImpl(uint64_t seed_in) CPUGeneratorImpl::CPUGeneratorImpl(uint64_t seed_in)
: c10::GeneratorImpl{Device(DeviceType::CPU), DispatchKeySet(c10::DispatchKey::CPU)}, : c10::GeneratorImpl{Device(DeviceType::CPU), DispatchKeySet(c10::DispatchKey::CPU)},
engine_{seed_in}, engine_{seed_in},
next_float_normal_sample_{c10::optional<float>()}, next_float_normal_sample_{std::optional<float>()},
next_double_normal_sample_{c10::optional<double>()} { } next_double_normal_sample_{std::optional<double>()} { }
/** /**
* Manually seeds the engine with the seed input * Manually seeds the engine with the seed input
@ -151,8 +151,8 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
detail::check_rng_state(new_state); detail::check_rng_state(new_state);
at::mt19937 engine; at::mt19937 engine;
auto float_normal_sample = c10::optional<float>(); auto float_normal_sample = std::optional<float>();
auto double_normal_sample = c10::optional<double>(); auto double_normal_sample = std::optional<double>();
// Construct the state of at::CPUGeneratorImpl based on input byte tensor size. // Construct the state of at::CPUGeneratorImpl based on input byte tensor size.
CPUGeneratorImplStateLegacy* legacy_pod{nullptr}; CPUGeneratorImplStateLegacy* legacy_pod{nullptr};
@ -160,7 +160,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
if (new_state_size == size_legacy) { if (new_state_size == size_legacy) {
legacy_pod = (CPUGeneratorImplStateLegacy*)new_state.data(); legacy_pod = (CPUGeneratorImplStateLegacy*)new_state.data();
// Note that in CPUGeneratorImplStateLegacy, we didn't have float version // Note that in CPUGeneratorImplStateLegacy, we didn't have float version
// of normal sample and hence we leave the c10::optional<float> as is // of normal sample and hence we leave the std::optional<float> as is
// Update next_double_normal_sample. // Update next_double_normal_sample.
// Note that CPUGeneratorImplStateLegacy stores two uniform values (normal_x, normal_y) // Note that CPUGeneratorImplStateLegacy stores two uniform values (normal_x, normal_y)
@ -171,14 +171,14 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
auto r = legacy_pod->normal_rho; auto r = legacy_pod->normal_rho;
auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x; auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x;
// we return the sin version of the normal sample when in caching mode // we return the sin version of the normal sample when in caching mode
double_normal_sample = c10::optional<double>(r * ::sin(theta)); double_normal_sample = std::optional<double>(r * ::sin(theta));
} }
} else if (new_state_size == size_current) { } else if (new_state_size == size_current) {
auto rng_state = (CPUGeneratorImplState*)new_state.data(); auto rng_state = (CPUGeneratorImplState*)new_state.data();
legacy_pod = &rng_state->legacy_pod; legacy_pod = &rng_state->legacy_pod;
// update next_float_normal_sample // update next_float_normal_sample
if (rng_state->is_next_float_normal_sample_valid) { if (rng_state->is_next_float_normal_sample_valid) {
float_normal_sample = c10::optional<float>(rng_state->next_float_normal_sample); float_normal_sample = std::optional<float>(rng_state->next_float_normal_sample);
} }
// Update next_double_normal_sample. // Update next_double_normal_sample.
@ -186,7 +186,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
// and if it's valid in normal_is_valid. The redundant normal_x and normal_rho // and if it's valid in normal_is_valid. The redundant normal_x and normal_rho
// are squashed to 0.0. // are squashed to 0.0.
if (legacy_pod->normal_is_valid) { if (legacy_pod->normal_is_valid) {
double_normal_sample = c10::optional<double>(legacy_pod->normal_y); double_normal_sample = std::optional<double>(legacy_pod->normal_y);
} }
} else { } else {
AT_ERROR("Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy, AT_ERROR("Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy,
@ -283,14 +283,14 @@ uint64_t CPUGeneratorImpl::random64() {
/** /**
* Get the cached normal random in float * Get the cached normal random in float
*/ */
c10::optional<float> CPUGeneratorImpl::next_float_normal_sample() { std::optional<float> CPUGeneratorImpl::next_float_normal_sample() {
return next_float_normal_sample_; return next_float_normal_sample_;
} }
/** /**
* Get the cached normal random in double * Get the cached normal random in double
*/ */
c10::optional<double> CPUGeneratorImpl::next_double_normal_sample() { std::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
return next_double_normal_sample_; return next_double_normal_sample_;
} }
@ -299,7 +299,7 @@ c10::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
* *
* See Note [Acquire lock when using random generators] * See Note [Acquire lock when using random generators]
*/ */
void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional<float> randn) { void CPUGeneratorImpl::set_next_float_normal_sample(std::optional<float> randn) {
next_float_normal_sample_ = randn; next_float_normal_sample_ = randn;
} }
@ -308,7 +308,7 @@ void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional<float> randn)
* *
* See Note [Acquire lock when using random generators] * See Note [Acquire lock when using random generators]
*/ */
void CPUGeneratorImpl::set_next_double_normal_sample(c10::optional<double> randn) { void CPUGeneratorImpl::set_next_double_normal_sample(std::optional<double> randn) {
next_double_normal_sample_ = randn; next_double_normal_sample_ = randn;
} }

View File

@ -24,18 +24,18 @@ struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
static c10::DeviceType device_type(); static c10::DeviceType device_type();
uint32_t random(); uint32_t random();
uint64_t random64(); uint64_t random64();
c10::optional<float> next_float_normal_sample(); std::optional<float> next_float_normal_sample();
c10::optional<double> next_double_normal_sample(); std::optional<double> next_double_normal_sample();
void set_next_float_normal_sample(c10::optional<float> randn); void set_next_float_normal_sample(std::optional<float> randn);
void set_next_double_normal_sample(c10::optional<double> randn); void set_next_double_normal_sample(std::optional<double> randn);
at::mt19937 engine(); at::mt19937 engine();
void set_engine(at::mt19937 engine); void set_engine(at::mt19937 engine);
private: private:
CPUGeneratorImpl* clone_impl() const override; CPUGeneratorImpl* clone_impl() const override;
at::mt19937 engine_; at::mt19937 engine_;
c10::optional<float> next_float_normal_sample_; std::optional<float> next_float_normal_sample_;
c10::optional<double> next_double_normal_sample_; std::optional<double> next_double_normal_sample_;
}; };
namespace detail { namespace detail {

View File

@ -59,7 +59,7 @@ class TORCH_API Context {
} }
} }
const AcceleratorHooksInterface& getAcceleratorHooksInterface( const AcceleratorHooksInterface& getAcceleratorHooksInterface(
c10::optional<c10::DeviceType> opt_device_type = c10::nullopt) { std::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
c10::DeviceType device_type = opt_device_type.has_value() c10::DeviceType device_type = opt_device_type.has_value()
? opt_device_type.value() ? opt_device_type.value()
: at::getAccelerator(true).value(); : at::getAccelerator(true).value();
@ -395,7 +395,7 @@ class TORCH_API Context {
bool release_original_weights = false; bool release_original_weights = false;
#endif #endif
bool display_vmap_fallback_warnings_ = false; bool display_vmap_fallback_warnings_ = false;
c10::optional<at::QEngine> quantized_engine = c10::nullopt; std::optional<at::QEngine> quantized_engine = c10::nullopt;
bool enable_sparse_tensor_invariant_checks = false; bool enable_sparse_tensor_invariant_checks = false;
bool allow_fp16_reduction_cpu = false; bool allow_fp16_reduction_cpu = false;

View File

@ -15,7 +15,7 @@ namespace at {
// OptionalDeviceGuard guard(device_of(tensor)); // OptionalDeviceGuard guard(device_of(tensor));
/// Return the Device of a Tensor, if the Tensor is defined. /// Return the Device of a Tensor, if the Tensor is defined.
inline c10::optional<Device> device_of(const Tensor& t) { inline std::optional<Device> device_of(const Tensor& t) {
if (t.defined()) { if (t.defined()) {
return c10::make_optional(t.device()); return c10::make_optional(t.device());
} else { } else {
@ -23,14 +23,14 @@ inline c10::optional<Device> device_of(const Tensor& t) {
} }
} }
inline c10::optional<Device> device_of(const c10::optional<Tensor>& t) { inline std::optional<Device> device_of(const c10::optional<Tensor>& t) {
return t.has_value() ? device_of(t.value()) : c10::nullopt; return t.has_value() ? device_of(t.value()) : c10::nullopt;
} }
/// Return the Device of a TensorList, if the list is non-empty and /// Return the Device of a TensorList, if the list is non-empty and
/// the first Tensor is defined. (This function implicitly assumes /// the first Tensor is defined. (This function implicitly assumes
/// that all tensors in the list have the same device.) /// that all tensors in the list have the same device.)
inline c10::optional<Device> device_of(ITensorListRef t) { inline std::optional<Device> device_of(ITensorListRef t) {
if (!t.empty()) { if (!t.empty()) {
return device_of(t.front()); return device_of(t.front());
} else { } else {

View File

@ -163,7 +163,7 @@ TensorBase _empty_generic(
c10::Allocator* allocator, c10::Allocator* allocator,
c10::DispatchKeySet ks, c10::DispatchKeySet ks,
ScalarType scalar_type, ScalarType scalar_type,
c10::optional<c10::MemoryFormat> memory_format_opt) { std::optional<c10::MemoryFormat> memory_format_opt) {
at::detail::check_size_nonnegative(size); at::detail::check_size_nonnegative(size);
at::detail::raise_warning_for_complex_half(scalar_type); at::detail::raise_warning_for_complex_half(scalar_type);
caffe2::TypeMeta dtype = scalarTypeToTypeMeta(scalar_type); caffe2::TypeMeta dtype = scalarTypeToTypeMeta(scalar_type);
@ -197,7 +197,7 @@ TensorBase empty_generic(
c10::Allocator* allocator, c10::Allocator* allocator,
c10::DispatchKeySet ks, c10::DispatchKeySet ks,
ScalarType scalar_type, ScalarType scalar_type,
c10::optional<c10::MemoryFormat> memory_format_opt) { std::optional<c10::MemoryFormat> memory_format_opt) {
return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt); return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt);
} }
@ -206,7 +206,7 @@ TensorBase empty_generic_symint(
c10::Allocator* allocator, c10::Allocator* allocator,
c10::DispatchKeySet ks, c10::DispatchKeySet ks,
ScalarType scalar_type, ScalarType scalar_type,
c10::optional<c10::MemoryFormat> memory_format_opt) { std::optional<c10::MemoryFormat> memory_format_opt) {
return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt); return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt);
} }
@ -252,7 +252,7 @@ TensorBase empty_strided_symint_generic(
} }
TensorBase empty_cpu(IntArrayRef size, ScalarType dtype, bool pin_memory, TensorBase empty_cpu(IntArrayRef size, ScalarType dtype, bool pin_memory,
c10::optional<c10::MemoryFormat> memory_format_opt) { std::optional<c10::MemoryFormat> memory_format_opt) {
auto allocator = GetCPUAllocatorMaybePinned(pin_memory); auto allocator = GetCPUAllocatorMaybePinned(pin_memory);
constexpr c10::DispatchKeySet cpu_ks(c10::DispatchKey::CPU); constexpr c10::DispatchKeySet cpu_ks(c10::DispatchKey::CPU);
return empty_generic(size, allocator, cpu_ks, dtype, memory_format_opt); return empty_generic(size, allocator, cpu_ks, dtype, memory_format_opt);
@ -260,11 +260,11 @@ TensorBase empty_cpu(IntArrayRef size, ScalarType dtype, bool pin_memory,
TensorBase empty_cpu( TensorBase empty_cpu(
IntArrayRef size, IntArrayRef size,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt, std::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt) { std::optional<c10::MemoryFormat> memory_format_opt) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
@ -295,10 +295,10 @@ TensorBase empty_strided_cpu(IntArrayRef size, IntArrayRef stride,
TensorBase empty_strided_cpu( TensorBase empty_strided_cpu(
IntArrayRef size, IntArrayRef size,
IntArrayRef stride, IntArrayRef stride,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt) { std::optional<bool> pin_memory_opt) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
@ -342,7 +342,7 @@ static MetaAllocator g_meta_alloc;
REGISTER_ALLOCATOR(kMeta, &g_meta_alloc); REGISTER_ALLOCATOR(kMeta, &g_meta_alloc);
TensorBase empty_meta(IntArrayRef size, ScalarType dtype, TensorBase empty_meta(IntArrayRef size, ScalarType dtype,
c10::optional<c10::MemoryFormat> memory_format_opt) { std::optional<c10::MemoryFormat> memory_format_opt) {
auto *allocator = GetAllocator(kMeta); auto *allocator = GetAllocator(kMeta);
constexpr c10::DispatchKeySet meta_dks(c10::DispatchKey::Meta); constexpr c10::DispatchKeySet meta_dks(c10::DispatchKey::Meta);
return at::detail::empty_generic( return at::detail::empty_generic(
@ -351,11 +351,11 @@ TensorBase empty_meta(IntArrayRef size, ScalarType dtype,
TensorBase empty_meta( TensorBase empty_meta(
IntArrayRef size, IntArrayRef size,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt, std::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt std::optional<c10::MemoryFormat> memory_format_opt
) { ) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
// NB: because there is no SparseMeta (yet), non-strided layout is // NB: because there is no SparseMeta (yet), non-strided layout is
@ -371,11 +371,11 @@ TensorBase empty_meta(
TensorBase empty_symint_meta( TensorBase empty_symint_meta(
SymIntArrayRef size, SymIntArrayRef size,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt, std::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt std::optional<c10::MemoryFormat> memory_format_opt
) { ) {
auto *allocator = GetAllocator(kMeta); auto *allocator = GetAllocator(kMeta);
constexpr c10::DispatchKeySet ks(c10::DispatchKey::Meta); constexpr c10::DispatchKeySet ks(c10::DispatchKey::Meta);
@ -405,10 +405,10 @@ TensorBase empty_strided_meta(IntArrayRef size, IntArrayRef stride,
TensorBase empty_strided_meta( TensorBase empty_strided_meta(
IntArrayRef size, IntArrayRef size,
IntArrayRef stride, IntArrayRef stride,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt) { std::optional<bool> pin_memory_opt) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
@ -440,10 +440,10 @@ TensorBase empty_strided_symint_meta(SymIntArrayRef size, SymIntArrayRef stride,
TensorBase empty_strided_symint_meta( TensorBase empty_strided_symint_meta(
SymIntArrayRef size, SymIntArrayRef size,
SymIntArrayRef stride, SymIntArrayRef stride,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt) { std::optional<bool> pin_memory_opt) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);

View File

@ -49,14 +49,14 @@ TORCH_API TensorBase empty_generic(
c10::Allocator* allocator, c10::Allocator* allocator,
c10::DispatchKeySet ks, c10::DispatchKeySet ks,
ScalarType scalar_type, ScalarType scalar_type,
c10::optional<c10::MemoryFormat> memory_format_opt); std::optional<c10::MemoryFormat> memory_format_opt);
TORCH_API TensorBase empty_generic_symint( TORCH_API TensorBase empty_generic_symint(
SymIntArrayRef size, SymIntArrayRef size,
c10::Allocator* allocator, c10::Allocator* allocator,
c10::DispatchKeySet ks, c10::DispatchKeySet ks,
ScalarType scalar_type, ScalarType scalar_type,
c10::optional<c10::MemoryFormat> memory_format_opt); std::optional<c10::MemoryFormat> memory_format_opt);
TORCH_API TensorBase empty_strided_generic( TORCH_API TensorBase empty_strided_generic(
IntArrayRef size, IntArrayRef size,
@ -76,15 +76,15 @@ TORCH_API TensorBase empty_cpu(
IntArrayRef size, IntArrayRef size,
ScalarType dtype, ScalarType dtype,
bool pin_memory = false, bool pin_memory = false,
c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt); std::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
TORCH_API TensorBase empty_cpu( TORCH_API TensorBase empty_cpu(
IntArrayRef size, IntArrayRef size,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt, std::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt); std::optional<c10::MemoryFormat> memory_format_opt);
TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options); TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options);
@ -97,10 +97,10 @@ TORCH_API TensorBase empty_strided_cpu(
TORCH_API TensorBase empty_strided_cpu( TORCH_API TensorBase empty_strided_cpu(
IntArrayRef size, IntArrayRef size,
IntArrayRef stride, IntArrayRef stride,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt); std::optional<bool> pin_memory_opt);
TORCH_API TensorBase empty_strided_cpu( TORCH_API TensorBase empty_strided_cpu(
IntArrayRef size, IntArrayRef size,
@ -110,23 +110,23 @@ TORCH_API TensorBase empty_strided_cpu(
TORCH_API TensorBase empty_meta( TORCH_API TensorBase empty_meta(
IntArrayRef size, IntArrayRef size,
ScalarType dtype, ScalarType dtype,
c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt); std::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
TORCH_API TensorBase empty_meta( TORCH_API TensorBase empty_meta(
IntArrayRef size, IntArrayRef size,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt, std::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt); std::optional<c10::MemoryFormat> memory_format_opt);
TORCH_API TensorBase empty_symint_meta( TORCH_API TensorBase empty_symint_meta(
SymIntArrayRef size, SymIntArrayRef size,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt, std::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt); std::optional<c10::MemoryFormat> memory_format_opt);
TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options); TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options);
@ -136,10 +136,10 @@ empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype);
TORCH_API TensorBase empty_strided_meta( TORCH_API TensorBase empty_strided_meta(
IntArrayRef size, IntArrayRef size,
IntArrayRef stride, IntArrayRef stride,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt); std::optional<bool> pin_memory_opt);
TORCH_API TensorBase empty_strided_meta( TORCH_API TensorBase empty_strided_meta(
IntArrayRef size, IntArrayRef size,
@ -154,10 +154,10 @@ TORCH_API TensorBase empty_strided_symint_meta(
TORCH_API TensorBase empty_strided_symint_meta( TORCH_API TensorBase empty_strided_symint_meta(
SymIntArrayRef size, SymIntArrayRef size,
SymIntArrayRef stride, SymIntArrayRef stride,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt); std::optional<bool> pin_memory_opt);
TORCH_API TensorBase empty_strided_symint_meta( TORCH_API TensorBase empty_strided_symint_meta(
SymIntArrayRef size, SymIntArrayRef size,

View File

@ -145,7 +145,7 @@ Tensor FunctionalInverses::_neg_view_inverse(const Tensor& base, const Tensor& m
} }
} }
Tensor FunctionalInverses::as_strided_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size, at::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) { Tensor FunctionalInverses::as_strided_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size, at::SymIntArrayRef stride, std::optional<c10::SymInt> storage_offset) {
if (inverse_return_mode == InverseReturnMode::AlwaysView) { if (inverse_return_mode == InverseReturnMode::AlwaysView) {
// NB: assumes mutated_view is a narrowed view of base. // NB: assumes mutated_view is a narrowed view of base.
// We should NOT do this for functionalization // We should NOT do this for functionalization
@ -220,7 +220,7 @@ Tensor FunctionalInverses::lift_fresh_inverse(const Tensor& base, const Tensor&
return mutated_view; return mutated_view;
} }
Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) { Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t dim, std::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
if (inverse_return_mode == InverseReturnMode::AlwaysView) { if (inverse_return_mode == InverseReturnMode::AlwaysView) {
// NB: assumes mutated_view is a narrowed view of base. // NB: assumes mutated_view is a narrowed view of base.
// We should NOT do this for functionalization // We should NOT do this for functionalization

View File

@ -526,7 +526,7 @@ Tensor to_functional_tensor(const Tensor& tensor) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isFunctionalTensor(tensor)); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isFunctionalTensor(tensor));
return at::detail::make_tensor<FunctionalTensorWrapper>(tensor); return at::detail::make_tensor<FunctionalTensorWrapper>(tensor);
} }
c10::optional<Tensor> to_functional_tensor(const c10::optional<Tensor>& tensor) { std::optional<Tensor> to_functional_tensor(const c10::optional<Tensor>& tensor) {
if (tensor.has_value()) { if (tensor.has_value()) {
return c10::make_optional<Tensor>(to_functional_tensor(*tensor)); return c10::make_optional<Tensor>(to_functional_tensor(*tensor));
} }
@ -564,7 +564,7 @@ Tensor from_functional_tensor(const Tensor& tensor, bool assert_functional) {
return tensor; return tensor;
} }
} }
c10::optional<Tensor> from_functional_tensor(const c10::optional<Tensor>& t, bool assert_functional) { std::optional<Tensor> from_functional_tensor(const c10::optional<Tensor>& t, bool assert_functional) {
if (t.has_value()) { if (t.has_value()) {
return c10::make_optional<Tensor>(from_functional_tensor(*t, assert_functional)); return c10::make_optional<Tensor>(from_functional_tensor(*t, assert_functional));
} }
@ -610,7 +610,7 @@ void sync(const Tensor& t) {
auto functional_impl = at::functionalization::impl::unsafeGetFunctionalWrapper(t); auto functional_impl = at::functionalization::impl::unsafeGetFunctionalWrapper(t);
functional_impl->sync_(); functional_impl->sync_();
} }
void sync(const c10::optional<Tensor>& t) { void sync(const std::optional<Tensor>& t) {
if (t.has_value()) { if (t.has_value()) {
sync(*t); sync(*t);
} }
@ -692,7 +692,7 @@ bool isFunctionalTensor(const at::Tensor& tensor) {
return tensor.unsafeGetTensorImpl()->key_set().has(c10::DispatchKey::Functionalize); return tensor.unsafeGetTensorImpl()->key_set().has(c10::DispatchKey::Functionalize);
} }
bool isFunctionalTensor(const c10::optional<Tensor>& t) { bool isFunctionalTensor(const std::optional<Tensor>& t) {
if (t.has_value()) { if (t.has_value()) {
return isFunctionalTensor(*t); return isFunctionalTensor(*t);
} else { } else {

View File

@ -286,32 +286,32 @@ TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper(
} }
TORCH_API bool isFunctionalTensor(const at::Tensor& tensor); TORCH_API bool isFunctionalTensor(const at::Tensor& tensor);
TORCH_API bool isFunctionalTensor(const c10::optional<Tensor>& t); TORCH_API bool isFunctionalTensor(const std::optional<Tensor>& t);
TORCH_API bool isFunctionalTensor( TORCH_API bool isFunctionalTensor(
const c10::List<c10::optional<Tensor>>& t_list); const c10::List<std::optional<Tensor>>& t_list);
TORCH_API bool isFunctionalTensor(ITensorListRef list); TORCH_API bool isFunctionalTensor(ITensorListRef list);
TORCH_API Tensor to_functional_tensor(const Tensor& tensor); TORCH_API Tensor to_functional_tensor(const Tensor& tensor);
TORCH_API c10::optional<Tensor> to_functional_tensor( TORCH_API std::optional<Tensor> to_functional_tensor(
const c10::optional<Tensor>& tensor); const std::optional<Tensor>& tensor);
TORCH_API c10::List<c10::optional<Tensor>> to_functional_tensor( TORCH_API c10::List<std::optional<Tensor>> to_functional_tensor(
const c10::List<c10::optional<Tensor>>& t_list); const c10::List<std::optional<Tensor>>& t_list);
TORCH_API std::vector<Tensor> to_functional_tensor(ITensorListRef t_list); TORCH_API std::vector<Tensor> to_functional_tensor(ITensorListRef t_list);
TORCH_API void freeze_functional_tensor(const Tensor& tensor); TORCH_API void freeze_functional_tensor(const Tensor& tensor);
TORCH_API Tensor TORCH_API Tensor
from_functional_tensor(const Tensor& tensor, bool assert_functional = true); from_functional_tensor(const Tensor& tensor, bool assert_functional = true);
TORCH_API c10::optional<Tensor> from_functional_tensor( TORCH_API std::optional<Tensor> from_functional_tensor(
const c10::optional<Tensor>& t, const std::optional<Tensor>& t,
bool assert_functional = true); bool assert_functional = true);
TORCH_API c10::List<c10::optional<Tensor>> from_functional_tensor( TORCH_API c10::List<std::optional<Tensor>> from_functional_tensor(
const c10::List<c10::optional<Tensor>>& t_list); const c10::List<std::optional<Tensor>>& t_list);
TORCH_API std::vector<Tensor> from_functional_tensor(ITensorListRef t_list); TORCH_API std::vector<Tensor> from_functional_tensor(ITensorListRef t_list);
TORCH_API void sync(const at::Tensor& t); TORCH_API void sync(const at::Tensor& t);
TORCH_API void sync(const c10::optional<Tensor>& t); TORCH_API void sync(const std::optional<Tensor>& t);
TORCH_API void sync(const c10::List<c10::optional<Tensor>>& t_list); TORCH_API void sync(const c10::List<std::optional<Tensor>>& t_list);
TORCH_API void sync(ITensorListRef t_list); TORCH_API void sync(ITensorListRef t_list);
TORCH_API void replace_(const Tensor& functional_tensor, const Tensor& other); TORCH_API void replace_(const Tensor& functional_tensor, const Tensor& other);

View File

@ -125,7 +125,7 @@ namespace {
// - when we resize to a larger size, it acts as a mutation // - when we resize to a larger size, it acts as a mutation
// - when we resize to a smaller size, it acts as a view // - when we resize to a smaller size, it acts as a view
// See Note [resize_ in Functionalization] for more dtails // See Note [resize_ in Functionalization] for more dtails
static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) { static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, std::optional<at::MemoryFormat> memory_format) {
// First unwrap the tensor arguments // First unwrap the tensor arguments
at::Tensor self_; at::Tensor self_;
if (at::functionalization::impl::isFunctionalTensor(self)) { if (at::functionalization::impl::isFunctionalTensor(self)) {
@ -216,7 +216,7 @@ static at::Tensor lift_fresh_functionalize_copy(const at::Tensor & self) {
// in the local include TLS. As a result, when we redispatch here, // in the local include TLS. As a result, when we redispatch here,
// we will end up hitting PreDispatch stack first. So, we should // we will end up hitting PreDispatch stack first. So, we should
// directly redispatch to the functionalize key manually. // directly redispatch to the functionalize key manually.
static auto op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clone", "").typed<at::Tensor(const at::Tensor &, c10::optional<at::MemoryFormat>)>(); static auto op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clone", "").typed<at::Tensor(const at::Tensor &, std::optional<at::MemoryFormat>)>();
return op.redispatch(c10::DispatchKeySet({c10::DispatchKey::Functionalize}), self, c10::nullopt); return op.redispatch(c10::DispatchKeySet({c10::DispatchKey::Functionalize}), self, c10::nullopt);
} }
@ -225,7 +225,7 @@ static at::Tensor lift_fresh_functionalize_copy(const at::Tensor & self) {
return at::functionalization::impl::to_functional_tensor(out); return at::functionalization::impl::to_functional_tensor(out);
} }
static bool device_opted_into_functionalization(c10::Device self_device, c10::optional<c10::Device> tgt_device) { static bool device_opted_into_functionalization(c10::Device self_device, std::optional<c10::Device> tgt_device) {
// If the target device is empty, then the output tensor should be on the same device as the input // If the target device is empty, then the output tensor should be on the same device as the input
auto real_tgt_device = tgt_device.has_value() ? tgt_device.value() : self_device; auto real_tgt_device = tgt_device.has_value() ? tgt_device.value() : self_device;
return real_tgt_device.type() == c10::DeviceType::XLA || real_tgt_device.type() == c10::DeviceType::Lazy; return real_tgt_device.type() == c10::DeviceType::XLA || real_tgt_device.type() == c10::DeviceType::Lazy;
@ -235,12 +235,12 @@ static bool device_opted_into_functionalization(c10::Device self_device, c10::op
// We should probably get rid of this though. // We should probably get rid of this though.
static at::Tensor _to_copy_functionalize( static at::Tensor _to_copy_functionalize(
const at::Tensor & self, const at::Tensor & self,
c10::optional<at::ScalarType> dtype, std::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, std::optional<at::Layout> layout,
c10::optional<at::Device> device, std::optional<at::Device> device,
c10::optional<bool> pin_memory, std::optional<bool> pin_memory,
bool non_blocking, bool non_blocking,
c10::optional<at::MemoryFormat> memory_format) { std::optional<at::MemoryFormat> memory_format) {
at::Tensor self_; at::Tensor self_;
if (at::functionalization::impl::isFunctionalTensor(self)) { if (at::functionalization::impl::isFunctionalTensor(self)) {
// sync any pending updates // sync any pending updates

View File

@ -23,7 +23,7 @@ inline void infer_size_impl(
ResultVec& res) { ResultVec& res) {
NumelType newsize = 1; NumelType newsize = 1;
// N.B. this is an index, not a sym dim! // N.B. this is an index, not a sym dim!
auto infer_dim = c10::optional<int64_t>(); auto infer_dim = std::optional<int64_t>();
for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) { for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) {
if (shape[dim] == -1) { if (shape[dim] == -1) {
if (infer_dim) { if (infer_dim) {

View File

@ -380,8 +380,8 @@ Tensor select_backward_batching_rule(const Tensor& grad, IntArrayRef input_sizes
Tensor slice_batching_rule( Tensor slice_batching_rule(
const Tensor& self, const Tensor& self,
int64_t dim, int64_t dim,
c10::optional<int64_t> start, std::optional<int64_t> start,
c10::optional<int64_t> end, std::optional<int64_t> end,
int64_t step) { int64_t step) {
auto self_physical = MultiBatchVmapTransform::logicalToPhysical(self); auto self_physical = MultiBatchVmapTransform::logicalToPhysical(self);
auto dim_physical = self_physical.getPhysicalDim(dim); auto dim_physical = self_physical.getPhysicalDim(dim);
@ -996,10 +996,10 @@ Tensor new_zeros_batching_rule(
Tensor new_empty_batching_rule( Tensor new_empty_batching_rule(
const Tensor& self, const Tensor& self,
IntArrayRef size, IntArrayRef size,
c10::optional<ScalarType> dtype, std::optional<ScalarType> dtype,
c10::optional<Layout> layout, std::optional<Layout> layout,
c10::optional<Device> device, std::optional<Device> device,
c10::optional<bool> pin_memory) { std::optional<bool> pin_memory) {
auto physical_view = MultiBatchVmapTransform::logicalToPhysical(self); auto physical_view = MultiBatchVmapTransform::logicalToPhysical(self);
auto physical_size = physical_view.getPhysicalShape(size); auto physical_size = physical_view.getPhysicalShape(size);
auto result = physical_view.tensor().new_empty(physical_size, TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory)); auto result = physical_view.tensor().new_empty(physical_size, TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory));
@ -1209,10 +1209,10 @@ TORCH_LIBRARY_IMPL(aten, Batched, m) {
BINARY_POINTWISE(mul); BINARY_POINTWISE(mul);
BINARY_POINTWISE(div); BINARY_POINTWISE(div);
{ {
using Binop = Tensor (*)(const Tensor&, const Tensor&, c10::optional<c10::string_view>); using Binop = Tensor (*)(const Tensor&, const Tensor&, std::optional<c10::string_view>);
using Unop = Tensor (*)(const Tensor&, const Scalar&, c10::optional<c10::string_view>); using Unop = Tensor (*)(const Tensor&, const Scalar&, std::optional<c10::string_view>);
m.impl("div.Tensor_mode", binary_pointwise_batching_rule<Binop, at::div, c10::optional<c10::string_view>>); m.impl("div.Tensor_mode", binary_pointwise_batching_rule<Binop, at::div, std::optional<c10::string_view>>);
m.impl("div.Scalar_mode", unwrap_and_call<Unop, at::div, const Scalar&, c10::optional<c10::string_view>>); m.impl("div.Scalar_mode", unwrap_and_call<Unop, at::div, const Scalar&, std::optional<c10::string_view>>);
} }
// at::pow has three out-of-place overloads // at::pow has three out-of-place overloads

View File

@ -128,7 +128,7 @@ static void assert_names_equal(DimnameList a, DimnameList b) {
} }
const Tensor& propagate_names_if_present_and_nonempty(const Tensor& result, const Tensor& propagate_names_if_present_and_nonempty(const Tensor& result,
c10::optional<DimnameList> maybe_names, std::optional<DimnameList> maybe_names,
bool validate_names) { bool validate_names) {
auto maybe_name_list = maybe_names.value_or(at::ArrayRef<Dimname>{}); auto maybe_name_list = maybe_names.value_or(at::ArrayRef<Dimname>{});
propagate_names_if_nonempty(result.unsafeGetTensorImpl(), maybe_name_list, validate_names); propagate_names_if_nonempty(result.unsafeGetTensorImpl(), maybe_name_list, validate_names);

View File

@ -81,7 +81,7 @@ namespace namedinference {
const Tensor& propagate_names_if_present_and_nonempty( const Tensor& propagate_names_if_present_and_nonempty(
const Tensor& result, const Tensor& result,
c10::optional<DimnameList> maybe_names, std::optional<DimnameList> maybe_names,
bool validate_names = false); bool validate_names = false);
// Propagates `names` to `result` if `names` is not empty. // Propagates `names` to `result` if `names` is not empty.
// `names` can be empty; see [NOTE] Writing name inference rules // `names` can be empty; see [NOTE] Writing name inference rules

View File

@ -236,7 +236,7 @@ NestedTensorImpl::NestedTensorImpl(
set_custom_sizes_strides(c10::TensorImpl::SizesStridesPolicy::CustomSizes); set_custom_sizes_strides(c10::TensorImpl::SizesStridesPolicy::CustomSizes);
} }
c10::optional<int64_t> NestedTensorImpl::opt_size(int64_t d) const { std::optional<int64_t> NestedTensorImpl::opt_size(int64_t d) const {
if (C10_UNLIKELY(!opt_sizes_.has_value())) { if (C10_UNLIKELY(!opt_sizes_.has_value())) {
// Cache the metadata to avoid recomputing it each time. // Cache the metadata to avoid recomputing it each time.
opt_sizes_ = c10::make_optional(construct_opt_sizes(nested_sizes_)); opt_sizes_ = c10::make_optional(construct_opt_sizes(nested_sizes_));

View File

@ -61,10 +61,10 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
// Returns nullopt if the ith dimension is irregular. The ith dimension // Returns nullopt if the ith dimension is irregular. The ith dimension
// of a NestedTensor is regular if the unbound tensors match in // of a NestedTensor is regular if the unbound tensors match in
// size at the (i-1)th dimension. // size at the (i-1)th dimension.
c10::optional<int64_t> opt_size(int64_t d) const; std::optional<int64_t> opt_size(int64_t d) const;
int64_t size(int64_t d) const { int64_t size(int64_t d) const {
c10::optional<int64_t> optional_size = this->opt_size(d); std::optional<int64_t> optional_size = this->opt_size(d);
TORCH_CHECK( TORCH_CHECK(
optional_size.has_value(), optional_size.has_value(),
"Given dimension ", "Given dimension ",
@ -171,7 +171,7 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
// Optional to allow it to be computed lazily from nested. // Optional to allow it to be computed lazily from nested.
// TODO: maybe we can remove this metadata since // TODO: maybe we can remove this metadata since
// we can compute it from `nested_sizes_` // we can compute it from `nested_sizes_`
mutable c10::optional<std::vector<int64_t>> opt_sizes_; mutable std::optional<std::vector<int64_t>> opt_sizes_;
template <typename VariableVersion> template <typename VariableVersion>
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core( c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(

View File

@ -35,7 +35,7 @@ void SavedTensorDefaultHooks::enable() {
tls.disabled_error_message = c10::nullopt; tls.disabled_error_message = c10::nullopt;
} }
const c10::optional<std::string>& SavedTensorDefaultHooks::get_disabled_error_message() { const std::optional<std::string>& SavedTensorDefaultHooks::get_disabled_error_message() {
return tls.disabled_error_message; return tls.disabled_error_message;
} }

View File

@ -21,7 +21,7 @@ struct TORCH_API SavedTensorDefaultHooksTLS {
// disabled_error_message is nullopt IFF Saved Tensor hooks is enabled // disabled_error_message is nullopt IFF Saved Tensor hooks is enabled
// We did this for efficiency (so we didn't have to keep a separate bool // We did this for efficiency (so we didn't have to keep a separate bool
// around) // around)
c10::optional<std::string> disabled_error_message; std::optional<std::string> disabled_error_message;
}; };
} // namespace impl } // namespace impl
@ -46,7 +46,7 @@ struct TORCH_API SavedTensorDefaultHooks {
static void disable(const std::string& error_message); static void disable(const std::string& error_message);
static void enable(); static void enable();
static bool is_enabled(); static bool is_enabled();
static const c10::optional<std::string>& get_disabled_error_message(); static const std::optional<std::string>& get_disabled_error_message();
}; };
} // namespace at } // namespace at

View File

@ -23,7 +23,7 @@ Tensor& scalar_fill(Tensor& self, const Scalar& value) {
return self; return self;
} }
Tensor scalar_tensor_static(const Scalar& s, c10::optional<ScalarType> dtype_opt, c10::optional<Device> device_opt) { Tensor scalar_tensor_static(const Scalar& s, std::optional<ScalarType> dtype_opt, c10::optional<Device> device_opt) {
at::tracer::impl::NoTracerDispatchMode tracer_guard; at::tracer::impl::NoTracerDispatchMode tracer_guard;
at::AutoDispatchBelowAutograd mode; at::AutoDispatchBelowAutograd mode;
Tensor result = at::detail::empty_cpu( Tensor result = at::detail::empty_cpu(

View File

@ -18,8 +18,8 @@ namespace at::detail {
Tensor& scalar_fill(Tensor& self, const Scalar& value); Tensor& scalar_fill(Tensor& self, const Scalar& value);
TORCH_API Tensor scalar_tensor_static( TORCH_API Tensor scalar_tensor_static(
const Scalar& s, const Scalar& s,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Device> device_opt); std::optional<Device> device_opt);
} // namespace at::detail } // namespace at::detail
// This is in the c10 namespace because we use ADL to find the functions in it. // This is in the c10 namespace because we use ADL to find the functions in it.

View File

@ -39,9 +39,9 @@ TORCH_API extern const EllipsisIndexType Ellipsis;
struct TORCH_API Slice final { struct TORCH_API Slice final {
public: public:
Slice( Slice(
c10::optional<c10::SymInt> start_index = c10::nullopt, std::optional<c10::SymInt> start_index = c10::nullopt,
c10::optional<c10::SymInt> stop_index = c10::nullopt, std::optional<c10::SymInt> stop_index = c10::nullopt,
c10::optional<c10::SymInt> step_index = c10::nullopt) { std::optional<c10::SymInt> step_index = c10::nullopt) {
if (!step_index.has_value()) { if (!step_index.has_value()) {
step_ = c10::SymInt(1); step_ = c10::SymInt(1);
} else { } else {
@ -205,7 +205,7 @@ static inline Tensor applySlice(
c10::SymInt step, c10::SymInt step,
bool disable_slice_optimization, bool disable_slice_optimization,
const at::Device& self_device, const at::Device& self_device,
const c10::optional<SymIntArrayRef>& self_sizes) { const std::optional<SymIntArrayRef>& self_sizes) {
// TODO: implement negative step // TODO: implement negative step
TORCH_CHECK_VALUE(step > 0, "step must be greater than zero"); TORCH_CHECK_VALUE(step > 0, "step must be greater than zero");
@ -233,7 +233,7 @@ static inline Tensor applySelect(
SymInt index, SymInt index,
int64_t real_dim, int64_t real_dim,
const at::Device& /*self_device*/, const at::Device& /*self_device*/,
const c10::optional<SymIntArrayRef>& self_sizes) { const std::optional<SymIntArrayRef>& self_sizes) {
// See NOTE [nested tensor size for indexing] // See NOTE [nested tensor size for indexing]
if (self_sizes.has_value()) { if (self_sizes.has_value()) {
auto maybe_index = index.maybe_as_int(); auto maybe_index = index.maybe_as_int();
@ -431,7 +431,7 @@ static inline Tensor handleDimInMultiDimIndexing(
std::vector<Tensor>& outIndices, std::vector<Tensor>& outIndices,
bool disable_slice_optimization, bool disable_slice_optimization,
const at::Device& original_tensor_device, const at::Device& original_tensor_device,
const c10::optional<SymIntArrayRef>& prev_dim_result_sizes) { const std::optional<SymIntArrayRef>& prev_dim_result_sizes) {
if (index.is_integer()) { if (index.is_integer()) {
return impl::applySelect( return impl::applySelect(
prev_dim_result, prev_dim_result,
@ -515,7 +515,7 @@ static inline Tensor applySlicing(
std::vector<Tensor>& outIndices, std::vector<Tensor>& outIndices,
bool disable_slice_optimization, bool disable_slice_optimization,
const at::Device& self_device, const at::Device& self_device,
const c10::optional<SymIntArrayRef>& self_sizes) { const std::optional<SymIntArrayRef>& self_sizes) {
int64_t dim = 0; int64_t dim = 0;
int64_t specified_dims = impl::count_specified_dimensions(indices); int64_t specified_dims = impl::count_specified_dimensions(indices);
@ -531,9 +531,9 @@ static inline Tensor applySlicing(
for (const auto i : c10::irange(indices.size())) { for (const auto i : c10::irange(indices.size())) {
auto& obj = indices[i]; auto& obj = indices[i];
// See NOTE [nested tensor size for indexing] // See NOTE [nested tensor size for indexing]
c10::optional<SymIntArrayRef> result_sizes = result.is_nested() std::optional<SymIntArrayRef> result_sizes = result.is_nested()
? c10::optional<SymIntArrayRef>(c10::nullopt) ? std::optional<SymIntArrayRef>(c10::nullopt)
: c10::optional<SymIntArrayRef>(result.sym_sizes()); : std::optional<SymIntArrayRef>(result.sym_sizes());
result = handleDimInMultiDimIndexing( result = handleDimInMultiDimIndexing(
/*prev_dim_result=*/result, /*prev_dim_result=*/result,
/*original_tensor=*/self, /*original_tensor=*/self,
@ -607,9 +607,9 @@ static inline Tensor get_item(
// nested tensor does not have a size (yet) so for now we represent its size // nested tensor does not have a size (yet) so for now we represent its size
// as null may need to be changed after we reach a better solution for nested // as null may need to be changed after we reach a better solution for nested
// tensor size // tensor size
c10::optional<SymIntArrayRef> self_sizes = self.is_nested() std::optional<SymIntArrayRef> self_sizes = self.is_nested()
? c10::optional<SymIntArrayRef>(c10::nullopt) ? std::optional<SymIntArrayRef>(c10::nullopt)
: c10::optional<SymIntArrayRef>(self.sym_sizes()); : std::optional<SymIntArrayRef>(self.sym_sizes());
// handle simple types: integers, slices, none, ellipsis, bool // handle simple types: integers, slices, none, ellipsis, bool
if (indices.size() == 1) { if (indices.size() == 1) {

View File

@ -147,7 +147,7 @@ struct TORCH_API OperandInfo {
/// promotion target_dtype value can become different from tensor's dtype /// promotion target_dtype value can become different from tensor's dtype
/// also, during type promotion target_dtype and device can be set for an /// also, during type promotion target_dtype and device can be set for an
/// undefined tensor so that tensor can be properly constructed later. /// undefined tensor so that tensor can be properly constructed later.
c10::optional<Device> device = c10::nullopt; std::optional<Device> device = c10::nullopt;
ScalarType target_dtype = ScalarType::Undefined; ScalarType target_dtype = ScalarType::Undefined;
// Caches dtype of the tensor, because scalar_type is an expensive operation // Caches dtype of the tensor, because scalar_type is an expensive operation
// If dtype of the tensor is changed (e.g. as a result of type promotion or in // If dtype of the tensor is changed (e.g. as a result of type promotion or in
@ -971,9 +971,9 @@ class TORCH_API TensorIteratorConfig final {
int num_outputs_ = 0; int num_outputs_ = 0;
int num_inputs_ = 0; int num_inputs_ = 0;
c10::optional<DimVector> static_shape_ = c10::nullopt; std::optional<DimVector> static_shape_ = c10::nullopt;
c10::optional<ScalarType> static_dtype_ = c10::nullopt; std::optional<ScalarType> static_dtype_ = c10::nullopt;
c10::optional<Device> static_device_ = c10::nullopt; std::optional<Device> static_device_ = c10::nullopt;
bool check_mem_overlap_ = true; bool check_mem_overlap_ = true;
bool allow_cpu_scalars_ = false; bool allow_cpu_scalars_ = false;
bool is_reduction_ = false; bool is_reduction_ = false;

View File

@ -61,7 +61,7 @@ inline bool areAnyTensorSubclassLike(TensorList tensors) {
} }
inline bool areAnyOptionalTensorSubclassLike( inline bool areAnyOptionalTensorSubclassLike(
const c10::List<c10::optional<Tensor>>& tensors) { const c10::List<std::optional<Tensor>>& tensors) {
if (c10::impl::dispatch_mode_enabled()) if (c10::impl::dispatch_mode_enabled())
return true; return true;
return std::any_of( return std::any_of(

View File

@ -327,7 +327,7 @@ std::vector<int64_t> defaultStrides(IntArrayRef sizes) {
// see overloads of computeStride() below. // see overloads of computeStride() below.
// //
template <typename ResultVec, typename NewShapeVec, typename Numel> template <typename ResultVec, typename NewShapeVec, typename Numel>
inline c10::optional<ResultVec> computeStride_impl( inline std::optional<ResultVec> computeStride_impl(
const NewShapeVec& oldshape, const NewShapeVec& oldshape,
const NewShapeVec& oldstride, const NewShapeVec& oldstride,
const NewShapeVec& newshape, const NewShapeVec& newshape,
@ -395,7 +395,7 @@ inline c10::optional<ResultVec> computeStride_impl(
return newstride; return newstride;
} }
c10::optional<std::vector<int64_t>> computeStride( std::optional<std::vector<int64_t>> computeStride(
IntArrayRef oldshape, IntArrayRef oldshape,
IntArrayRef oldstride, IntArrayRef oldstride,
IntArrayRef newshape) { IntArrayRef newshape) {
@ -403,7 +403,7 @@ c10::optional<std::vector<int64_t>> computeStride(
return computeStride_impl<std::vector<int64_t>, IntArrayRef, int64_t>(oldshape, oldstride, newshape, toResult); return computeStride_impl<std::vector<int64_t>, IntArrayRef, int64_t>(oldshape, oldstride, newshape, toResult);
} }
c10::optional<SymDimVector> computeStride( std::optional<SymDimVector> computeStride(
c10::SymIntArrayRef oldshape, c10::SymIntArrayRef oldshape,
c10::SymIntArrayRef oldstride, c10::SymIntArrayRef oldstride,
c10::SymIntArrayRef newshape) { c10::SymIntArrayRef newshape) {
@ -411,7 +411,7 @@ c10::optional<SymDimVector> computeStride(
return computeStride_impl<SymDimVector, c10::SymIntArrayRef, c10::SymInt>(oldshape, oldstride, newshape, toResult); return computeStride_impl<SymDimVector, c10::SymIntArrayRef, c10::SymInt>(oldshape, oldstride, newshape, toResult);
} }
c10::optional<DimVector> computeStride( std::optional<DimVector> computeStride(
IntArrayRef oldshape, IntArrayRef oldshape,
IntArrayRef oldstride, IntArrayRef oldstride,
const DimVector& newshape) { const DimVector& newshape) {

View File

@ -171,17 +171,17 @@ TORCH_API void check_dim_size(
namespace detail { namespace detail {
TORCH_API std::vector<int64_t> defaultStrides(IntArrayRef sizes); TORCH_API std::vector<int64_t> defaultStrides(IntArrayRef sizes);
TORCH_API c10::optional<std::vector<int64_t>> computeStride( TORCH_API std::optional<std::vector<int64_t>> computeStride(
IntArrayRef oldshape, IntArrayRef oldshape,
IntArrayRef oldstride, IntArrayRef oldstride,
IntArrayRef newshape); IntArrayRef newshape);
TORCH_API c10::optional<SymDimVector> computeStride( TORCH_API std::optional<SymDimVector> computeStride(
c10::SymIntArrayRef oldshape, c10::SymIntArrayRef oldshape,
c10::SymIntArrayRef oldstride, c10::SymIntArrayRef oldstride,
c10::SymIntArrayRef newshape); c10::SymIntArrayRef newshape);
TORCH_API c10::optional<DimVector> computeStride( TORCH_API std::optional<DimVector> computeStride(
IntArrayRef oldshape, IntArrayRef oldshape,
IntArrayRef oldstride, IntArrayRef oldstride,
const DimVector& newshape); const DimVector& newshape);

View File

@ -39,7 +39,7 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) {
// CppFunction::makeNamedNotSupported() to avoid listing out the types of everything. // CppFunction::makeNamedNotSupported() to avoid listing out the types of everything.
// However, registering e.g. CppFunction::makeNamedNotSupported() as an implementation // However, registering e.g. CppFunction::makeNamedNotSupported() as an implementation
// only works for operators that support boxing. // only works for operators that support boxing.
#define TENSOROPTIONS c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool> #define TENSOROPTIONS std::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>
// random operations (out-of-place) // random operations (out-of-place)
m.impl("bernoulli", unsupportedRandomOp<const Tensor&, optional<Generator>>); m.impl("bernoulli", unsupportedRandomOp<const Tensor&, optional<Generator>>);

View File

@ -16,7 +16,7 @@ namespace at {
const auto num_arguments = arguments.size(); const auto num_arguments = arguments.size();
const auto stack_start = stack->size() - num_arguments; const auto stack_start = stack->size() - num_arguments;
c10::optional<bool> is_write; std::optional<bool> is_write;
for (const auto i : c10::irange(num_arguments)) { for (const auto i : c10::irange(num_arguments)) {
const auto& alias_info = arguments[i].alias_info(); const auto& alias_info = arguments[i].alias_info();
if (alias_info != nullptr) { if (alias_info != nullptr) {

View File

@ -144,7 +144,7 @@ Tensor cached_cast(at::ScalarType to_type, const Tensor& arg, DeviceType device_
Banned functions Banned functions
*******************************/ *******************************/
static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const c10::optional<Tensor>&, int64_t) { static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const std::optional<Tensor>&, int64_t) {
AT_ERROR("torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n" AT_ERROR("torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n"
"Many models use a sigmoid layer right before the binary cross entropy layer.\n" "Many models use a sigmoid layer right before the binary cross entropy layer.\n"
"In this case, combine the two layers using torch.nn.functional.binary_cross_entropy_with_logits\n" "In this case, combine the two layers using torch.nn.functional.binary_cross_entropy_with_logits\n"

View File

@ -297,9 +297,9 @@ TORCH_API Tensor cached_cast(
c10::DeviceType device_type = c10::DeviceType::CUDA); c10::DeviceType device_type = c10::DeviceType::CUDA);
// Overload to process optional<Tensor> // Overload to process optional<Tensor>
inline c10::optional<Tensor> cached_cast( inline std::optional<Tensor> cached_cast(
at::ScalarType to_type, at::ScalarType to_type,
const c10::optional<Tensor>& arg, const std::optional<Tensor>& arg,
c10::DeviceType device_type = c10::DeviceType::CUDA) { c10::DeviceType device_type = c10::DeviceType::CUDA) {
if (arg.has_value()) { if (arg.has_value()) {
return cached_cast(to_type, *arg, device_type); return cached_cast(to_type, *arg, device_type);
@ -353,9 +353,9 @@ Otherwise, set it to the autocast type.
********************************************************/ ********************************************************/
// Overload to catch dtype flags // Overload to catch dtype flags
c10::optional<ScalarType> inline set_opt_dtype( std::optional<ScalarType> inline set_opt_dtype(
at::ScalarType to_type, at::ScalarType to_type,
const c10::optional<ScalarType>& dtype) { const std::optional<ScalarType>& dtype) {
return dtype.has_value() ? dtype : to_type; return dtype.has_value() ? dtype : to_type;
} }
@ -392,7 +392,7 @@ enum class CastPolicy : uint8_t {
fp32, // Cast all inputs to at::kFloat before running the op. fp32, // Cast all inputs to at::kFloat before running the op.
fp32_set_opt_dtype, // Treats functions (like softmax) that fp32_set_opt_dtype, // Treats functions (like softmax) that
// 1. we'd like to run in fp32 and // 1. we'd like to run in fp32 and
// 2. have a c10::optional<ScalarType> arg that controls // 2. have a std::optional<ScalarType> arg that controls
// the output type. // the output type.
// fp32_set_opt_dtype wrappers' policy is: if the output // fp32_set_opt_dtype wrappers' policy is: if the output
// type is already set, don't touch it, otherwise, set // type is already set, don't touch it, otherwise, set
@ -865,24 +865,24 @@ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions.
_(ADD_NS(norm), \ _(ADD_NS(norm), \
"norm.Scalar", \ "norm.Scalar", \
Tensor(const Tensor&, const Scalar&), \ Tensor(const Tensor&, const Scalar&), \
Tensor(const Tensor&, const c10::optional<Scalar>&, ScalarType), \ Tensor(const Tensor&, const std::optional<Scalar>&, ScalarType), \
fp32_append_dtype) \ fp32_append_dtype) \
_(ADD_NS(norm), \ _(ADD_NS(norm), \
"norm.ScalarOpt_dim", \ "norm.ScalarOpt_dim", \
Tensor(const Tensor&, const c10::optional<Scalar>&, IntArrayRef, bool), \ Tensor(const Tensor&, const std::optional<Scalar>&, IntArrayRef, bool), \
Tensor( \ Tensor( \
const Tensor&, \ const Tensor&, \
const c10::optional<Scalar>&, \ const std::optional<Scalar>&, \
IntArrayRef, \ IntArrayRef, \
bool, \ bool, \
ScalarType), \ ScalarType), \
fp32_append_dtype) \ fp32_append_dtype) \
_(ADD_NS(norm), \ _(ADD_NS(norm), \
"norm.names_ScalarOpt_dim", \ "norm.names_ScalarOpt_dim", \
Tensor(const Tensor&, const c10::optional<Scalar>&, DimnameList, bool), \ Tensor(const Tensor&, const std::optional<Scalar>&, DimnameList, bool), \
Tensor( \ Tensor( \
const Tensor&, \ const Tensor&, \
const c10::optional<Scalar>&, \ const std::optional<Scalar>&, \
DimnameList, \ DimnameList, \
bool, \ bool, \
ScalarType), \ ScalarType), \

View File

@ -152,7 +152,7 @@ struct CachingHostAllocatorImpl {
// do not need to look up the ctx in blocks_. // do not need to look up the ctx in blocks_.
auto* block = reinterpret_cast<B*>(ctx); auto* block = reinterpret_cast<B*>(ctx);
c10::optional<std::vector<E>> events; std::optional<std::vector<E>> events;
{ {
std::lock_guard<std::mutex> g(block->mutex_); std::lock_guard<std::mutex> g(block->mutex_);
block->allocated_ = false; block->allocated_ = false;
@ -263,7 +263,7 @@ struct CachingHostAllocatorImpl {
// Avoid calling cudaEventDestroy while holding a mutex, so move // Avoid calling cudaEventDestroy while holding a mutex, so move
// intermediate events out of the lock into this object. // intermediate events out of the lock into this object.
// process the last event // process the last event
c10::optional<std::pair<E, B*>> processed; std::optional<std::pair<E, B*>> processed;
{ {
std::lock_guard<std::mutex> g(events_mutex_); std::lock_guard<std::mutex> g(events_mutex_);
if (!events_.empty()) { if (!events_.empty()) {
@ -324,7 +324,7 @@ struct CachingHostAllocatorImpl {
} }
// Record an event on stream and store event into events. // Record an event on stream and store event into events.
virtual void record_stream(c10::optional<std::vector<E>>& events, S stream) { virtual void record_stream(std::optional<std::vector<E>>& events, S stream) {
TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for record_stream"); TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for record_stream");
} }

View File

@ -2,10 +2,10 @@
namespace c10::impl { namespace c10::impl {
inline c10::optional<MemoryFormat> inline std::optional<MemoryFormat>
check_tensor_options_and_extract_memory_format( check_tensor_options_and_extract_memory_format(
const TensorOptions& options, const TensorOptions& options,
c10::optional<MemoryFormat> memory_format) { std::optional<MemoryFormat> memory_format) {
TORCH_CHECK( TORCH_CHECK(
options.requires_grad_opt() == c10::nullopt || options.requires_grad_opt() == c10::nullopt ||
options.requires_grad_opt().value() == false, options.requires_grad_opt().value() == false,

View File

@ -14,7 +14,7 @@ Storage DeprecatedTypeProperties::unsafeStorageFromTH(void * th_pointer, bool re
return at::unsafeStorageFromTH(th_pointer, retain); return at::unsafeStorageFromTH(th_pointer, retain);
} }
Tensor DeprecatedTypeProperties::copy(const Tensor & src, bool non_blocking, c10::optional<Device> to_device) const { Tensor DeprecatedTypeProperties::copy(const Tensor & src, bool non_blocking, std::optional<Device> to_device) const {
if (to_device) { if (to_device) {
return src.to(src.options().dtype(scalarType()).device(to_device), non_blocking, /*copy=*/true); return src.to(src.options().dtype(scalarType()).device(to_device), non_blocking, /*copy=*/true);
} }

View File

@ -107,7 +107,7 @@ class TORCH_API DeprecatedTypeProperties {
/// Constructs the `TensorOptions` from a type and a Device. Asserts that /// Constructs the `TensorOptions` from a type and a Device. Asserts that
/// the device type matches the device type of the type. /// the device type matches the device type of the type.
TensorOptions options(c10::optional<Device> device_opt) const { TensorOptions options(std::optional<Device> device_opt) const {
if (!device_opt.has_value()) { if (!device_opt.has_value()) {
return options(-1); return options(-1);
} else { } else {
@ -129,7 +129,7 @@ class TORCH_API DeprecatedTypeProperties {
Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const; Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const;
Storage unsafeStorageFromTH(void * th_pointer, bool retain) const; Storage unsafeStorageFromTH(void * th_pointer, bool retain) const;
Tensor copy(const Tensor & src, bool non_blocking=false, c10::optional<Device> to_device={}) const; Tensor copy(const Tensor & src, bool non_blocking=false, std::optional<Device> to_device={}) const;
private: private:
Backend backend_; Backend backend_;

View File

@ -21,7 +21,7 @@ struct TORCH_API Dimname {
bool isWildcard() const { return type_ == NameType::WILDCARD; } bool isWildcard() const { return type_ == NameType::WILDCARD; }
bool matches(Dimname other) const; bool matches(Dimname other) const;
c10::optional<Dimname> unify(Dimname other) const; std::optional<Dimname> unify(Dimname other) const;
private: private:
Dimname(Symbol name) Dimname(Symbol name)

View File

@ -144,7 +144,7 @@ template <typename RNG, typename ret_type,
C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \ C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \
if (generator->next_##TYPE##_normal_sample()) { \ if (generator->next_##TYPE##_normal_sample()) { \
*ret = *(generator->next_##TYPE##_normal_sample()); \ *ret = *(generator->next_##TYPE##_normal_sample()); \
generator->set_next_##TYPE##_normal_sample(c10::optional<TYPE>()); \ generator->set_next_##TYPE##_normal_sample(std::optional<TYPE>()); \
return true; \ return true; \
} \ } \
return false; \ return false; \

View File

@ -150,7 +150,7 @@ Generator make_generator(Args&&... args) {
* the backend generator type (CPU/CUDAGeneratorImpl etc.) * the backend generator type (CPU/CUDAGeneratorImpl etc.)
*/ */
template <typename T> template <typename T>
static inline T * check_generator(c10::optional<Generator> gen) { static inline T * check_generator(std::optional<Generator> gen) {
TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt"); TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt");
TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed"); TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed");
TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'"); TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'");
@ -164,7 +164,7 @@ static inline T * check_generator(c10::optional<Generator> gen) {
* the backend generator type (CPU/CUDAGeneratorImpl etc.) * the backend generator type (CPU/CUDAGeneratorImpl etc.)
*/ */
template <typename T> template <typename T>
static inline T* get_generator_or_default(const c10::optional<Generator>& gen, const Generator& default_gen) { static inline T* get_generator_or_default(const std::optional<Generator>& gen, const Generator& default_gen) {
return gen.has_value() && gen->defined() ? check_generator<T>(gen) : check_generator<T>(default_gen); return gen.has_value() && gen->defined() ? check_generator<T>(gen) : check_generator<T>(default_gen);
} }

View File

@ -5,8 +5,8 @@ namespace at {
static std::mutex _generator_mutex_lock; static std::mutex _generator_mutex_lock;
c10::optional<GeneratorFuncType>& GetGeneratorPrivate() { std::optional<GeneratorFuncType>& GetGeneratorPrivate() {
static c10::optional<GeneratorFuncType> generator_privateuse1 = c10::nullopt; static std::optional<GeneratorFuncType> generator_privateuse1 = c10::nullopt;
return generator_privateuse1; return generator_privateuse1;
} }

View File

@ -7,7 +7,7 @@ namespace at {
using GeneratorFuncType = std::function<at::Generator(c10::DeviceIndex)>; using GeneratorFuncType = std::function<at::Generator(c10::DeviceIndex)>;
c10::optional<GeneratorFuncType>& GetGeneratorPrivate(); std::optional<GeneratorFuncType>& GetGeneratorPrivate();
class TORCH_API _GeneratorRegister { class TORCH_API _GeneratorRegister {
public: public:

View File

@ -58,10 +58,10 @@ struct ListElementConstReferenceTraits {
using const_reference = typename c10::detail::ivalue_to_const_ref_overload_return<T>::type; using const_reference = typename c10::detail::ivalue_to_const_ref_overload_return<T>::type;
}; };
// There is no to() overload for c10::optional<std::string>. // There is no to() overload for std::optional<std::string>.
template<> template<>
struct ListElementConstReferenceTraits<c10::optional<std::string>> { struct ListElementConstReferenceTraits<std::optional<std::string>> {
using const_reference = c10::optional<std::reference_wrapper<const std::string>>; using const_reference = std::optional<std::reference_wrapper<const std::string>>;
}; };
template<class T, class Iterator> template<class T, class Iterator>

View File

@ -168,8 +168,8 @@ list_element_to_const_ref(const IValue& element) {
} }
template<> template<>
inline typename ListElementConstReferenceTraits<c10::optional<std::string>>::const_reference inline typename ListElementConstReferenceTraits<std::optional<std::string>>::const_reference
list_element_to_const_ref<c10::optional<std::string>>(const IValue& element) { list_element_to_const_ref<std::optional<std::string>>(const IValue& element) {
return element.toOptionalStringRef(); return element.toOptionalStringRef();
} }

View File

@ -1127,13 +1127,13 @@ TEST(ListTest, canAccessStringByReference) {
} }
TEST(ListTest, canAccessOptionalStringByReference) { TEST(ListTest, canAccessOptionalStringByReference) {
List<c10::optional<std::string>> list({"one", "two", c10::nullopt}); List<std::optional<std::string>> list({"one", "two", c10::nullopt});
const auto& listRef = list; const auto& listRef = list;
static_assert( static_assert(
std::is_same_v<decltype(listRef[1]), c10::optional<std::reference_wrapper<const std::string>>>, std::is_same_v<decltype(listRef[1]), std::optional<std::reference_wrapper<const std::string>>>,
"List<c10::optional<std::string>> access should be by const reference"); "List<std::optional<std::string>> access should be by const reference");
c10::optional<std::string> str1 = list[1]; std::optional<std::string> str1 = list[1];
c10::optional<std::string> str2 = list[2]; std::optional<std::string> str2 = list[2];
decltype(auto) strRef1 = listRef[1]; decltype(auto) strRef1 = listRef[1];
decltype(auto) strRef2 = listRef[2]; decltype(auto) strRef2 = listRef[2];
// NOLINTNEXTLINE(bugprone-unchecked-optional-access) // NOLINTNEXTLINE(bugprone-unchecked-optional-access)

View File

@ -100,7 +100,7 @@ void check_names_valid_for(const TensorBase& tensor, DimnameList names);
void check_names_valid_for(size_t tensor_dim, DimnameList names); void check_names_valid_for(size_t tensor_dim, DimnameList names);
// Sets the names of `tensor` to be `names`. // Sets the names of `tensor` to be `names`.
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional<DimnameList> names); TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::optional<DimnameList> names);
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector<Dimname>&& names, bool validate_names); TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector<Dimname>&& names, bool validate_names);
constexpr size_t kMaxNamedTensorDim = 64; constexpr size_t kMaxNamedTensorDim = 64;
@ -111,7 +111,7 @@ namespace impl {
// Some helper functions on TensorImpl. Useful for working with names in TH. // Some helper functions on TensorImpl. Useful for working with names in TH.
// XXX: Ideally these would exist as methods on TensorImpl // XXX: Ideally these would exist as methods on TensorImpl
TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional<DimnameList> names, bool validate_names); TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::optional<DimnameList> names, bool validate_names);
TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names); TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
void check_names_valid_for(TensorImpl* impl, DimnameList names); void check_names_valid_for(TensorImpl* impl, DimnameList names);
@ -132,7 +132,7 @@ TORCH_API DimnameList get_names(const TensorImpl* impl);
// Returns the names of the tensor if they have been allocated; returns nullopt // Returns the names of the tensor if they have been allocated; returns nullopt
// instead if the haven't been. The names of a tensor are not allocated if a // instead if the haven't been. The names of a tensor are not allocated if a
// tensor is constructed with names=None. // tensor is constructed with names=None.
TORCH_API c10::optional<DimnameList> get_opt_names(const TensorImpl* impl); TORCH_API std::optional<DimnameList> get_opt_names(const TensorImpl* impl);
} // namespace impl } // namespace impl

View File

@ -7,7 +7,7 @@ namespace c10 {
namespace { namespace {
bool _eq(const char* op, c10::SymNodeImpl* lhs, c10::SymNodeImpl* rhs) { bool _eq(const char* op, c10::SymNodeImpl* lhs, c10::SymNodeImpl* rhs) {
TORCH_INTERNAL_ASSERT(lhs->is_nested_int()); TORCH_INTERNAL_ASSERT(lhs->is_nested_int());
c10::optional<int64_t> c = rhs->nested_int(); std::optional<int64_t> c = rhs->nested_int();
return ( return (
c.has_value() && lhs->nested_int() == *c && c.has_value() && lhs->nested_int() == *c &&
lhs->nested_int_coeff() == rhs->nested_int_coeff()); lhs->nested_int_coeff() == rhs->nested_int_coeff());
@ -68,7 +68,7 @@ c10::SymNode NestedIntSymNodeImpl::le(const c10::SymNode& other) {
c10::SymNode NestedIntSymNodeImpl::mul(const c10::SymNode& other) { c10::SymNode NestedIntSymNodeImpl::mul(const c10::SymNode& other) {
TORCH_CHECK(!other->nested_int(), "nested int cannot be multiplied by nested int"); TORCH_CHECK(!other->nested_int(), "nested int cannot be multiplied by nested int");
c10::optional<int64_t> c = other->constant_int(); std::optional<int64_t> c = other->constant_int();
TORCH_CHECK(c.has_value()); TORCH_CHECK(c.has_value());
return SymNode(c10::make_intrusive<NestedIntSymNodeImpl>(val_, coeff_ * *c)); return SymNode(c10::make_intrusive<NestedIntSymNodeImpl>(val_, coeff_ * *c));
} }

View File

@ -134,11 +134,11 @@ class TORCH_API NestedIntSymNodeImpl : public SymNodeImpl {
c10::SymNode le(const c10::SymNode& other) override; c10::SymNode le(const c10::SymNode& other) override;
c10::SymNode mul(const c10::SymNode& other) override; c10::SymNode mul(const c10::SymNode& other) override;
c10::optional<int64_t> nested_int() override { std::optional<int64_t> nested_int() override {
return val_; return val_;
} }
c10::optional<int64_t> nested_int_coeff() override { std::optional<int64_t> nested_int_coeff() override {
return coeff_; return coeff_;
} }

View File

@ -14,7 +14,7 @@ namespace {
// To achieve this, we ensure that the tls is empty by default and emptied again both when // To achieve this, we ensure that the tls is empty by default and emptied again both when
// we call into user torch_dispatch or returning back to python after this call. // we call into user torch_dispatch or returning back to python after this call.
thread_local c10::optional<c10::impl::LocalDispatchKeySet> tls_on_entry; thread_local std::optional<c10::impl::LocalDispatchKeySet> tls_on_entry;
c10::impl::LocalDispatchKeySet safe_get_tls_on_entry() { c10::impl::LocalDispatchKeySet safe_get_tls_on_entry() {
TORCH_CHECK(tls_on_entry.has_value(), "Accessing torch dispatch state outside of '__torch_dispatch__' " TORCH_CHECK(tls_on_entry.has_value(), "Accessing torch dispatch state outside of '__torch_dispatch__' "

View File

@ -42,7 +42,7 @@ TensorBase TensorBase::to(
at::TensorOptions options, at::TensorOptions options,
bool non_blocking, bool non_blocking,
bool copy, bool copy,
c10::optional<at::MemoryFormat> memory_format) const { std::optional<at::MemoryFormat> memory_format) const {
Tensor self(*this); Tensor self(*this);
return at::_ops::to_dtype_layout::call( return at::_ops::to_dtype_layout::call(
self, optTypeMetaToScalarType(options.dtype_opt()), self, optTypeMetaToScalarType(options.dtype_opt()),
@ -134,8 +134,8 @@ bool TensorBase::retains_grad() const {
} }
void Tensor::_backward(TensorList inputs, void Tensor::_backward(TensorList inputs,
const c10::optional<Tensor>& gradient, const std::optional<Tensor>& gradient,
c10::optional<bool> keep_graph, std::optional<bool> keep_graph,
bool create_graph) const { bool create_graph) const {
return impl::GetVariableHooks()->_backward(*this, inputs, gradient, keep_graph, create_graph); return impl::GetVariableHooks()->_backward(*this, inputs, gradient, keep_graph, create_graph);
} }

View File

@ -147,7 +147,7 @@ class TORCH_API TensorBase {
const TensorBase& fill_(const c10::Scalar& scalar) const; const TensorBase& fill_(const c10::Scalar& scalar) const;
const TensorBase& zero_() const; const TensorBase& zero_() const;
TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const; TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, std::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
bool is_complex() const { bool is_complex() const {
return at::isComplexType(this->scalar_type()); return at::isComplexType(this->scalar_type());
@ -249,7 +249,7 @@ class TORCH_API TensorBase {
return impl_->strides(); return impl_->strides();
} }
// See impl::get_opt_names in ATen/NamedTensor.h for docs. // See impl::get_opt_names in ATen/NamedTensor.h for docs.
c10::optional<DimnameList> opt_names() const { std::optional<DimnameList> opt_names() const {
return impl::get_opt_names(unsafeGetTensorImpl()); return impl::get_opt_names(unsafeGetTensorImpl());
} }
// See impl::get_names in ATen/NamedTensor.h for docs. // See impl::get_names in ATen/NamedTensor.h for docs.
@ -712,7 +712,7 @@ class TORCH_API TensorBase {
/// // f requires grad, has no operation creating it /// // f requires grad, has no operation creating it
/// @endcode /// @endcode
/// \fn void backward(const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const; /// \fn void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
/// ///
/// Computes the gradient of current tensor with respect to graph leaves. /// Computes the gradient of current tensor with respect to graph leaves.
/// ///
@ -1010,7 +1010,7 @@ struct ExclusivelyOwnedTraits<at::TensorBase> : public c10::ExclusivelyOwnedTens
namespace at { namespace at {
inline c10::MaybeOwned<TensorBase> borrow_from_optional_tensor( inline c10::MaybeOwned<TensorBase> borrow_from_optional_tensor(
const c10::optional<TensorBase>& opt) { const std::optional<TensorBase>& opt) {
return opt.has_value() return opt.has_value()
? c10::MaybeOwned<TensorBase>::borrowed(*opt) ? c10::MaybeOwned<TensorBase>::borrowed(*opt)
: c10::MaybeOwned<TensorBase>::owned(std::in_place); : c10::MaybeOwned<TensorBase>::owned(std::in_place);

View File

@ -17,7 +17,7 @@ bool tensorlist_has_dispatch(at::ITensorListRef li) {
return false; return false;
} }
bool tensorlist_has_dispatch(const c10::List<c10::optional<at::Tensor>>& li) { bool tensorlist_has_dispatch(const c10::List<std::optional<at::Tensor>>& li) {
for (auto i : c10::irange(li.size())) { for (auto i : c10::irange(li.size())) {
auto t = li.get(i); auto t = li.get(i);
if (t && tensor_has_dispatch(*t)) { if (t && tensor_has_dispatch(*t)) {

View File

@ -10,7 +10,7 @@ namespace at::impl {
TORCH_API bool tensor_has_dispatch(const at::Tensor& t); TORCH_API bool tensor_has_dispatch(const at::Tensor& t);
TORCH_API bool tensorlist_has_dispatch(at::ITensorListRef li); TORCH_API bool tensorlist_has_dispatch(at::ITensorListRef li);
TORCH_API bool tensorlist_has_dispatch(const c10::List<c10::optional<at::Tensor>>& li); TORCH_API bool tensorlist_has_dispatch(const c10::List<std::optional<at::Tensor>>& li);
using c10::impl::dispatch_mode_enabled; using c10::impl::dispatch_mode_enabled;
} }

View File

@ -60,8 +60,8 @@ struct TORCH_API VariableHooksInterface {
virtual void _backward( virtual void _backward(
const Tensor&, const Tensor&,
TensorList, TensorList,
const c10::optional<Tensor>&, const std::optional<Tensor>&,
c10::optional<bool>, std::optional<bool>,
bool) const = 0; bool) const = 0;
virtual void requires_grad_(const TensorBase&, bool) const = 0; virtual void requires_grad_(const TensorBase&, bool) const = 0;
virtual void basic_autograd_not_implemented_fallback( virtual void basic_autograd_not_implemented_fallback(

View File

@ -22,7 +22,7 @@ using has_symint =
std::is_same<c10::SymInt, T>, std::is_same<c10::SymInt, T>,
std::is_same<c10::SymIntArrayRef, T>, std::is_same<c10::SymIntArrayRef, T>,
std::is_same<at::OptionalSymIntArrayRef, T>, std::is_same<at::OptionalSymIntArrayRef, T>,
std::is_same<c10::optional<c10::SymInt>, T> std::is_same<std::optional<c10::SymInt>, T>
>; >;
template <typename T> template <typename T>
@ -46,8 +46,8 @@ struct remove_symint<c10::SymIntArrayRef> {
}; };
template <> template <>
struct remove_symint<c10::optional<c10::SymInt>> { struct remove_symint<std::optional<c10::SymInt>> {
using type = c10::optional<int64_t>; using type = std::optional<int64_t>;
}; };

View File

@ -71,7 +71,7 @@ inline typename remove_symint<c10::SymIntArrayRef>::type unpackSymInt(c10::SymIn
} }
template <> template <>
inline typename remove_symint<c10::optional<c10::SymInt>>::type unpackSymInt(c10::optional<c10::SymInt> x) { inline typename remove_symint<std::optional<c10::SymInt>>::type unpackSymInt(c10::optional<c10::SymInt> x) {
return x.has_value() ? c10::make_optional(x->guard_int(__FILE__, __LINE__)) : c10::nullopt; return x.has_value() ? c10::make_optional(x->guard_int(__FILE__, __LINE__)) : c10::nullopt;
} }

View File

@ -6,7 +6,7 @@
using std::vector; using std::vector;
using std::tuple; using std::tuple;
using c10::optional; using std::optional;
using c10::IValue; using c10::IValue;
using c10::OperatorKernel; using c10::OperatorKernel;
using c10::OperatorHandle; using c10::OperatorHandle;

View File

@ -207,15 +207,15 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithIntListOu
EXPECT_EQ(6, result[0].toIntVector()[2]); EXPECT_EQ(6, result[0].toIntVector()[2]);
} }
std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) { std::tuple<Tensor, int64_t, std::vector<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) {
Dict<string, Tensor> dict; Dict<string, Tensor> dict;
dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("first", dummyTensor(DispatchKey::CPU));
dict.insert("second", dummyTensor(DispatchKey::CUDA)); dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>( return std::tuple<Tensor, int64_t, std::vector<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA), dummyTensor(DispatchKey::CUDA),
5, 5,
{dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}, {dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)},
c10::optional<int64_t>(std::in_place, 0), std::optional<int64_t>(std::in_place, 0),
dict dict
); );
} }
@ -808,11 +808,11 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenFallbackKernelWitho
EXPECT_EQ(4, outputs[0].toInt()); EXPECT_EQ(4, outputs[0].toInt());
} }
c10::optional<Tensor> called_arg2 = c10::nullopt; std::optional<Tensor> called_arg2 = c10::nullopt;
c10::optional<int64_t> called_arg3 = c10::nullopt; std::optional<int64_t> called_arg3 = c10::nullopt;
c10::optional<std::string> called_arg4 = c10::nullopt; std::optional<std::string> called_arg4 = c10::nullopt;
void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -846,7 +846,7 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithOptionalI
EXPECT_FALSE(called_arg4.has_value()); EXPECT_FALSE(called_arg4.has_value());
} }
c10::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { std::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -883,8 +883,8 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithOptionalI
EXPECT_FALSE(called_arg4.has_value()); EXPECT_FALSE(called_arg4.has_value());
} }
std::tuple<c10::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>> std::tuple<std::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4); return std::make_tuple(arg2, arg3, arg4);
} }
@ -936,7 +936,7 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernel_whenRegister
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
ASSERT_TRUE(op.has_value()); ASSERT_TRUE(op.has_value());
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
EXPECT_FALSE(differences.has_value()); EXPECT_FALSE(differences.has_value());
} }

View File

@ -223,15 +223,15 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithIntListOutput_w
EXPECT_EQ(6, result[0].toIntVector()[2]); EXPECT_EQ(6, result[0].toIntVector()[2]);
} }
std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) { std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) {
Dict<string, Tensor> dict; Dict<string, Tensor> dict;
dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("first", dummyTensor(DispatchKey::CPU));
dict.insert("second", dummyTensor(DispatchKey::CUDA)); dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>( return std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA), dummyTensor(DispatchKey::CUDA),
5, 5,
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}), c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
c10::optional<int64_t>(std::in_place, 0), std::optional<int64_t>(std::in_place, 0),
dict dict
); );
} }
@ -550,11 +550,11 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenFallbackKernelWithoutTens
EXPECT_EQ(4, outputs[0].toInt()); EXPECT_EQ(4, outputs[0].toInt());
} }
c10::optional<Tensor> called_arg2 = c10::nullopt; std::optional<Tensor> called_arg2 = c10::nullopt;
c10::optional<int64_t> called_arg3 = c10::nullopt; std::optional<int64_t> called_arg3 = c10::nullopt;
c10::optional<std::string> called_arg4 = c10::nullopt; std::optional<std::string> called_arg4 = c10::nullopt;
void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -588,7 +588,7 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithOptionalInputs_
EXPECT_FALSE(called_arg4.has_value()); EXPECT_FALSE(called_arg4.has_value());
} }
c10::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { std::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -625,8 +625,8 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithOptionalInputs_
EXPECT_FALSE(called_arg4.has_value()); EXPECT_FALSE(called_arg4.has_value());
} }
std::tuple<c10::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>> std::tuple<std::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4); return std::make_tuple(arg2, arg3, arg4);
} }
@ -690,7 +690,7 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernel_whenRegisteredWith
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
ASSERT_TRUE(op.has_value()); ASSERT_TRUE(op.has_value());
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
EXPECT_FALSE(differences.has_value()); EXPECT_FALSE(differences.has_value());
} }

View File

@ -188,15 +188,15 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithIntListOutp
TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) { TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
auto registrar = RegisterOperators() auto registrar = RegisterOperators()
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", [] (Tensor) -> std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> { .op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", [] (Tensor) -> std::tuple<Tensor, int64_t, std::vector<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> {
Dict<string, Tensor> dict; Dict<string, Tensor> dict;
dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("first", dummyTensor(DispatchKey::CPU));
dict.insert("second", dummyTensor(DispatchKey::CUDA)); dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>( return std::tuple<Tensor, int64_t, std::vector<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA), dummyTensor(DispatchKey::CUDA),
5, 5,
{dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}, {dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)},
c10::optional<int64_t>(std::in_place, 0), std::optional<int64_t>(std::in_place, 0),
dict dict
); );
}); });
@ -733,13 +733,13 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenFallbackKernelWithout
TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) { TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables) // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool called; bool called;
c10::optional<Tensor> called_arg2 = c10::nullopt; std::optional<Tensor> called_arg2 = c10::nullopt;
c10::optional<int64_t> called_arg3 = c10::nullopt; std::optional<int64_t> called_arg3 = c10::nullopt;
c10::optional<std::string> called_arg4 = c10::nullopt; std::optional<std::string> called_arg4 = c10::nullopt;
auto registrar = RegisterOperators().op( auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()", "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()",
[&] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { [&] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -773,13 +773,13 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp
TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) { TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables) // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool called; bool called;
c10::optional<Tensor> called_arg2 = c10::nullopt; std::optional<Tensor> called_arg2 = c10::nullopt;
c10::optional<int64_t> called_arg3 = c10::nullopt; std::optional<int64_t> called_arg3 = c10::nullopt;
c10::optional<std::string> called_arg4 = c10::nullopt; std::optional<std::string> called_arg4 = c10::nullopt;
auto registrar = RegisterOperators().op( auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?", "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?",
[&] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { [&] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -816,13 +816,13 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp
TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) { TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables) // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool called; bool called;
c10::optional<Tensor> called_arg2 = c10::nullopt; std::optional<Tensor> called_arg2 = c10::nullopt;
c10::optional<int64_t> called_arg3 = c10::nullopt; std::optional<int64_t> called_arg3 = c10::nullopt;
c10::optional<std::string> called_arg4 = c10::nullopt; std::optional<std::string> called_arg4 = c10::nullopt;
auto registrar = RegisterOperators().op( auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)", "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)",
[] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4); return std::make_tuple(arg2, arg3, arg4);
}); });
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""}); auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
@ -866,7 +866,7 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernel_whenRegistered
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
ASSERT_TRUE(op.has_value()); ASSERT_TRUE(op.has_value());
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
EXPECT_FALSE(differences.has_value()); EXPECT_FALSE(differences.has_value());
} }

View File

@ -187,15 +187,15 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithIntListOutput_whe
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) { TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
auto registrar = RegisterOperators() auto registrar = RegisterOperators()
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", .op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))",
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor) -> std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> { RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor) -> std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> {
Dict<string, Tensor> dict; Dict<string, Tensor> dict;
dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("first", dummyTensor(DispatchKey::CPU));
dict.insert("second", dummyTensor(DispatchKey::CUDA)); dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>( return std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA), dummyTensor(DispatchKey::CUDA),
5, 5,
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}), c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
c10::optional<int64_t>(std::in_place, 0), std::optional<int64_t>(std::in_place, 0),
dict dict
); );
})); }));
@ -466,14 +466,14 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenFallbackKernelWithoutTensor
EXPECT_EQ(4, outputs[0].toInt()); EXPECT_EQ(4, outputs[0].toInt());
} }
c10::optional<Tensor> called_arg2 = c10::nullopt; std::optional<Tensor> called_arg2 = c10::nullopt;
c10::optional<int64_t> called_arg3 = c10::nullopt; std::optional<int64_t> called_arg3 = c10::nullopt;
c10::optional<std::string> called_arg4 = c10::nullopt; std::optional<std::string> called_arg4 = c10::nullopt;
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) { TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
auto registrar = RegisterOperators().op( auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()", "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()",
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -507,7 +507,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_wi
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) { TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
auto registrar = RegisterOperators().op( auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?", "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?",
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -544,7 +544,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_wi
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) { TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
auto registrar = RegisterOperators().op( auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)", "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)",
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4); return std::make_tuple(arg2, arg3, arg4);
})); }));
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""}); auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
@ -588,7 +588,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernel_whenRegisteredWithou
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
ASSERT_TRUE(op.has_value()); ASSERT_TRUE(op.has_value());
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
EXPECT_FALSE(differences.has_value()); EXPECT_FALSE(differences.has_value());
} }

View File

@ -116,7 +116,7 @@ namespace impl {
}; };
template<class T, bool AllowDeprecatedTypes> template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<c10::optional<T>, AllowDeprecatedTypes> struct assert_is_valid_input_type<std::optional<T>, AllowDeprecatedTypes>
: assert_is_valid_input_type<T, AllowDeprecatedTypes> {}; : assert_is_valid_input_type<T, AllowDeprecatedTypes> {};
template <bool AllowDeprecatedTypes, class... Args> template <bool AllowDeprecatedTypes, class... Args>
@ -226,7 +226,7 @@ namespace impl {
}; };
template<class T, bool AllowDeprecatedTypes> template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<c10::optional<T>, AllowDeprecatedTypes> struct assert_is_valid_output_type<std::optional<T>, AllowDeprecatedTypes>
: assert_is_valid_output_type<T, AllowDeprecatedTypes> {}; : assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
template<class T, bool AllowDeprecatedTypes> template<class T, bool AllowDeprecatedTypes>

View File

@ -205,15 +205,15 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithIntListOutput_wh
} }
struct KernelWithMultipleOutputs final : OperatorKernel { struct KernelWithMultipleOutputs final : OperatorKernel {
std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> operator()(Tensor) { std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> operator()(Tensor) {
Dict<string, Tensor> dict; Dict<string, Tensor> dict;
dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("first", dummyTensor(DispatchKey::CPU));
dict.insert("second", dummyTensor(DispatchKey::CUDA)); dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>( return std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA), dummyTensor(DispatchKey::CUDA),
5, 5,
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}), c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
c10::optional<int64_t>(std::in_place, 0), std::optional<int64_t>(std::in_place, 0),
dict dict
); );
} }
@ -679,12 +679,12 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenFallbackKernelWithoutTenso
EXPECT_EQ(4, outputs[0].toInt()); EXPECT_EQ(4, outputs[0].toInt());
} }
c10::optional<Tensor> called_arg2 = c10::nullopt; std::optional<Tensor> called_arg2 = c10::nullopt;
c10::optional<int64_t> called_arg3 = c10::nullopt; std::optional<int64_t> called_arg3 = c10::nullopt;
c10::optional<std::string> called_arg4 = c10::nullopt; std::optional<std::string> called_arg4 = c10::nullopt;
struct KernelWithOptInputWithoutOutput final : OperatorKernel { struct KernelWithOptInputWithoutOutput final : OperatorKernel {
void operator()(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { void operator()(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -720,7 +720,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithOptionalInputs_w
} }
struct KernelWithOptInputWithOutput final : OperatorKernel { struct KernelWithOptInputWithOutput final : OperatorKernel {
c10::optional<Tensor> operator()(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { std::optional<Tensor> operator()(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
called = true; called = true;
called_arg2 = arg2; called_arg2 = arg2;
called_arg3 = arg3; called_arg3 = arg3;
@ -759,8 +759,8 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithOptionalInputs_w
} }
struct KernelWithOptInputWithMultipleOutputs final : OperatorKernel { struct KernelWithOptInputWithMultipleOutputs final : OperatorKernel {
std::tuple<c10::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>> std::tuple<std::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
operator()(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) { operator()(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4); return std::make_tuple(arg2, arg3, arg4);
} }
}; };
@ -821,7 +821,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernel_whenRegisteredWitho
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
ASSERT_TRUE(op.has_value()); ASSERT_TRUE(op.has_value());
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
EXPECT_FALSE(differences.has_value()); EXPECT_FALSE(differences.has_value());
} }
@ -832,7 +832,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernel_whenRegisteredCatch
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
ASSERT_TRUE(op.has_value()); ASSERT_TRUE(op.has_value());
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
EXPECT_FALSE(differences.has_value()); EXPECT_FALSE(differences.has_value());
} }

View File

@ -63,7 +63,7 @@ struct BuiltinOpFunction : public Function {
bool call( bool call(
Stack& stack, Stack& stack,
c10::optional<size_t>, std::optional<size_t>,
c10::function_ref<void(const Code&)>) override { c10::function_ref<void(const Code&)>) override {
run(stack); run(stack);
return false; return false;

View File

@ -469,7 +469,7 @@ bool ClassType::isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const {
} }
ClassTypePtr ClassType::create( ClassTypePtr ClassType::create(
c10::optional<QualifiedName> qualifiedName, std::optional<QualifiedName> qualifiedName,
std::weak_ptr<CompilationUnit> cu, std::weak_ptr<CompilationUnit> cu,
bool is_module, bool is_module,
std::string doc_string, std::string doc_string,
@ -483,7 +483,7 @@ ClassTypePtr ClassType::create(
} }
ClassType::ClassType( ClassType::ClassType(
c10::optional<QualifiedName> name, std::optional<QualifiedName> name,
std::weak_ptr<CompilationUnit> cu, std::weak_ptr<CompilationUnit> cu,
bool is_module, bool is_module,
std::string doc_string, std::string doc_string,
@ -620,7 +620,7 @@ IValue ClassType::getConstant(size_t slot) const {
return constantValues_[slot]; return constantValues_[slot];
} }
c10::optional<IValue> ClassType::findConstant(const std::string& name) const { std::optional<IValue> ClassType::findConstant(const std::string& name) const {
TORCH_INTERNAL_ASSERT(constantNames_.size() == constantValues_.size()); TORCH_INTERNAL_ASSERT(constantNames_.size() == constantValues_.size());
size_t pos = 0; size_t pos = 0;
for (const auto& c : constantNames_) { for (const auto& c : constantNames_) {
@ -652,7 +652,7 @@ std::shared_ptr<const CompilationUnit> ClassType::compilation_unit() const {
return cu; return cu;
} }
c10::optional<ClassType::Property> ClassType::getProperty(const std::string& name) { std::optional<ClassType::Property> ClassType::getProperty(const std::string& name) {
for (auto& prop : properties_) { for (auto& prop : properties_) {
if (name == prop.name) { if (name == prop.name) {
return prop; return prop;
@ -667,7 +667,7 @@ void ClassType::addProperty(const std::string& name, torch::jit::Function* gette
properties_.push_back({name, getter, setter}); properties_.push_back({name, getter, setter});
} }
c10::optional<size_t> ClassType::findConstantSlot(const std::string& name) const { std::optional<size_t> ClassType::findConstantSlot(const std::string& name) const {
TORCH_CHECK(constantNames_.size() == constantValues_.size()); TORCH_CHECK(constantNames_.size() == constantValues_.size());
size_t slot = 0; size_t slot = 0;
for (const auto& constant : constantNames_) { for (const auto& constant : constantNames_) {

View File

@ -74,7 +74,7 @@ struct TORCH_API ClassType : public NamedType {
// Create a class type with name `name` and its methods stored in `cu`. // Create a class type with name `name` and its methods stored in `cu`.
static ClassTypePtr create( static ClassTypePtr create(
c10::optional<QualifiedName> qualifiedName, std::optional<QualifiedName> qualifiedName,
std::weak_ptr<CompilationUnit> cu, std::weak_ptr<CompilationUnit> cu,
bool is_module = false, bool is_module = false,
std::string doc_string = "", std::string doc_string = "",
@ -152,7 +152,7 @@ struct TORCH_API ClassType : public NamedType {
// Attributes are stored in a specific slot at runtime for effiency. // Attributes are stored in a specific slot at runtime for effiency.
// When emitting instructions we specify the slot so that attribute access is // When emitting instructions we specify the slot so that attribute access is
// a constant lookup // a constant lookup
c10::optional<size_t> findAttributeSlot(const std::string& name) const { std::optional<size_t> findAttributeSlot(const std::string& name) const {
size_t slot = 0; size_t slot = 0;
for (const auto& attr : attributes_) { for (const auto& attr : attributes_) {
if (name == attr.getName()) { if (name == attr.getName()) {
@ -239,7 +239,7 @@ struct TORCH_API ClassType : public NamedType {
} }
// Get the property with the given \p name, if it exists on the class. // Get the property with the given \p name, if it exists on the class.
c10::optional<ClassType::Property> getProperty(const std::string& name); std::optional<ClassType::Property> getProperty(const std::string& name);
// Add a property named \p name with \p getter and \p setter as its getter and setter. // Add a property named \p name with \p getter and \p setter as its getter and setter.
void addProperty(const std::string& name, torch::jit::Function* getter, torch::jit::Function* setter); void addProperty(const std::string& name, torch::jit::Function* getter, torch::jit::Function* setter);
// Get a list of all properties. // Get a list of all properties.
@ -257,7 +257,7 @@ struct TORCH_API ClassType : public NamedType {
size_t addConstant(const std::string& name, const IValue& value); size_t addConstant(const std::string& name, const IValue& value);
c10::optional<size_t> findConstantSlot(const std::string& name) const; std::optional<size_t> findConstantSlot(const std::string& name) const;
size_t getConstantSlot(const std::string& name) const { size_t getConstantSlot(const std::string& name) const {
if (auto r = findConstantSlot(name)) { if (auto r = findConstantSlot(name)) {
@ -281,7 +281,7 @@ struct TORCH_API ClassType : public NamedType {
IValue getConstant(size_t slot) const; IValue getConstant(size_t slot) const;
c10::optional<IValue> findConstant(const std::string& name) const; std::optional<IValue> findConstant(const std::string& name) const;
size_t numConstants() const; size_t numConstants() const;
@ -384,7 +384,7 @@ struct TORCH_API ClassType : public NamedType {
private: private:
ClassType( ClassType(
c10::optional<QualifiedName> name, std::optional<QualifiedName> name,
std::weak_ptr<CompilationUnit> cu, std::weak_ptr<CompilationUnit> cu,
bool is_module = false, bool is_module = false,
std::string doc_string = "", std::string doc_string = "",

View File

@ -56,7 +56,7 @@ namespace detail {
void operator()(const at::Tensor& x) { void operator()(const at::Tensor& x) {
ts = ts | x.key_set(); ts = ts | x.key_set();
} }
void operator()(const c10::optional<at::Tensor>& x) { void operator()(const std::optional<at::Tensor>& x) {
if (x.has_value()) { if (x.has_value()) {
ts = ts | x->key_set(); ts = ts | x->key_set();
} }
@ -67,8 +67,8 @@ namespace detail {
} }
} }
// Tensor?[] translates to this case. // Tensor?[] translates to this case.
void operator()(const c10::List<c10::optional<at::Tensor>>& xs) { void operator()(const c10::List<std::optional<at::Tensor>>& xs) {
for (c10::optional<at::Tensor> x : xs) { for (std::optional<at::Tensor> x : xs) {
if (x.has_value()) { if (x.has_value()) {
ts = ts | x.value().key_set(); ts = ts | x.value().key_set();
} }
@ -80,7 +80,7 @@ namespace detail {
ts = ts | x.key_set(); ts = ts | x.key_set();
} }
} }
[[noreturn]] void operator()(at::ArrayRef<c10::optional<at::Tensor>>) { [[noreturn]] void operator()(at::ArrayRef<std::optional<at::Tensor>>) {
// Just checking that the handling of Tensor?[] didn't change. // Just checking that the handling of Tensor?[] didn't change.
TORCH_INTERNAL_ASSERT(false); TORCH_INTERNAL_ASSERT(false);
} }
@ -89,7 +89,7 @@ namespace detail {
ts = ts | gen.key_set(); ts = ts | gen.key_set();
} }
} }
void operator()(const c10::optional<at::Generator>& gen) { void operator()(const std::optional<at::Generator>& gen) {
if (gen.has_value() && gen->defined()) { if (gen.has_value() && gen->defined()) {
ts = ts | gen->key_set(); ts = ts | gen->key_set();
} }

View File

@ -76,8 +76,8 @@ C10_EXPORT Dispatcher& Dispatcher::realSingleton() {
return _singleton; return _singleton;
} }
c10::optional<OperatorHandle> Dispatcher::findOp(const OperatorName& overload_name) { std::optional<OperatorHandle> Dispatcher::findOp(const OperatorName& overload_name) {
return operatorLookupTable_.read([&] (const ska::flat_hash_map<OperatorName, OperatorHandle>& operatorLookupTable) -> c10::optional<OperatorHandle> { return operatorLookupTable_.read([&] (const ska::flat_hash_map<OperatorName, OperatorHandle>& operatorLookupTable) -> std::optional<OperatorHandle> {
auto found = operatorLookupTable.find(overload_name); auto found = operatorLookupTable.find(overload_name);
if (found == operatorLookupTable.end()) { if (found == operatorLookupTable.end()) {
return c10::nullopt; return c10::nullopt;
@ -103,7 +103,7 @@ void Dispatcher::waitForDef(const FunctionSchema& schema) {
"the same dependencies."); "the same dependencies.");
} }
void Dispatcher::waitForImpl(const OperatorName& op_name, c10::optional<c10::DispatchKey> maybe_dk) { void Dispatcher::waitForImpl(const OperatorName& op_name, std::optional<c10::DispatchKey> maybe_dk) {
using namespace std::chrono_literals; using namespace std::chrono_literals;
std::unique_lock<std::mutex> lock(guard_->mutex); std::unique_lock<std::mutex> lock(guard_->mutex);
auto dk = maybe_dk.value_or(DispatchKey::CompositeImplicitAutograd); auto dk = maybe_dk.value_or(DispatchKey::CompositeImplicitAutograd);
@ -121,7 +121,7 @@ void Dispatcher::waitForImpl(const OperatorName& op_name, c10::optional<c10::Dis
"the same dependencies."); "the same dependencies.");
} }
c10::optional<OperatorHandle> Dispatcher::findSchema(const OperatorName& overload_name) { std::optional<OperatorHandle> Dispatcher::findSchema(const OperatorName& overload_name) {
auto it = findOp(overload_name); auto it = findOp(overload_name);
if (it.has_value()) { if (it.has_value()) {
if (it->hasSchema()) { if (it->hasSchema()) {
@ -275,7 +275,7 @@ PythonModuleMapType& pythonModulesSingleton() {
} }
c10::optional<std::pair<const char*, const char*>> Dispatcher::getPyStub(OperatorName op_name) { std::optional<std::pair<const char*, const char*>> Dispatcher::getPyStub(OperatorName op_name) {
std::lock_guard<std::mutex> lock(guard_->mutex); std::lock_guard<std::mutex> lock(guard_->mutex);
auto found = pythonModulesSingleton().find(op_name); auto found = pythonModulesSingleton().find(op_name);
if (found == pythonModulesSingleton().end()) { if (found == pythonModulesSingleton().end()) {
@ -332,9 +332,9 @@ void Dispatcher::throwIfHasPythonModule(OperatorName op_name) {
RegistrationHandleRAII Dispatcher::registerImpl( RegistrationHandleRAII Dispatcher::registerImpl(
OperatorName op_name, OperatorName op_name,
c10::optional<DispatchKey> dispatch_key, std::optional<DispatchKey> dispatch_key,
KernelFunction kernel, KernelFunction kernel,
c10::optional<impl::CppSignature> cpp_signature, std::optional<impl::CppSignature> cpp_signature,
std::unique_ptr<FunctionSchema> inferred_function_schema, std::unique_ptr<FunctionSchema> inferred_function_schema,
std::string debug std::string debug
) { ) {
@ -364,7 +364,7 @@ RegistrationHandleRAII Dispatcher::registerImpl(
}); });
} }
void Dispatcher::deregisterImpl_(const OperatorHandle& op, const OperatorName& op_name, c10::optional<DispatchKey> dispatch_key, impl::OperatorEntry::AnnotatedKernelContainerIterator handle) { void Dispatcher::deregisterImpl_(const OperatorHandle& op, const OperatorName& op_name, std::optional<DispatchKey> dispatch_key, impl::OperatorEntry::AnnotatedKernelContainerIterator handle) {
op.operatorDef_->op.deregisterKernel_(*this, dispatch_key, handle); op.operatorDef_->op.deregisterKernel_(*this, dispatch_key, handle);
TORCH_INTERNAL_ASSERT(op.operator_name() == op_name); TORCH_INTERNAL_ASSERT(op.operator_name() == op_name);
@ -486,7 +486,7 @@ std::vector<OperatorHandle> Dispatcher::findDanglingImpls() const {
}); });
} }
std::vector<OperatorName> Dispatcher::getRegistrationsForDispatchKey(c10::optional<DispatchKey> k) const { std::vector<OperatorName> Dispatcher::getRegistrationsForDispatchKey(std::optional<DispatchKey> k) const {
return operatorLookupTable_.read([&] (const ska::flat_hash_map<OperatorName, OperatorHandle>& operatorLookupTable) -> std::vector<OperatorName> { return operatorLookupTable_.read([&] (const ska::flat_hash_map<OperatorName, OperatorHandle>& operatorLookupTable) -> std::vector<OperatorName> {
std::vector<OperatorName> op_names; std::vector<OperatorName> op_names;
for (const auto& op : operatorLookupTable) { for (const auto& op : operatorLookupTable) {

View File

@ -137,7 +137,7 @@ public:
* and returns it if it is registered WITH A SCHEMA. * and returns it if it is registered WITH A SCHEMA.
* Returns nullopt otherwise. * Returns nullopt otherwise.
*/ */
c10::optional<OperatorHandle> findSchema(const OperatorName& operator_name); std::optional<OperatorHandle> findSchema(const OperatorName& operator_name);
/** /**
* Variant of findSchema that results in less code generated at the call site. * Variant of findSchema that results in less code generated at the call site.
@ -155,7 +155,7 @@ public:
OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name); OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name);
// Like findSchema, but also returns OperatorHandle even if there is no schema // Like findSchema, but also returns OperatorHandle even if there is no schema
c10::optional<OperatorHandle> findOp(const OperatorName& operator_name); std::optional<OperatorHandle> findOp(const OperatorName& operator_name);
// Returns a list of all operator names present in the operatorLookupTable_ // Returns a list of all operator names present in the operatorLookupTable_
const std::vector<OperatorName> getAllOpNames(); const std::vector<OperatorName> getAllOpNames();
@ -196,7 +196,7 @@ public:
// Used by torchdeploy/multipy for multiple interpreters racing. // Used by torchdeploy/multipy for multiple interpreters racing.
void waitForDef(const FunctionSchema& schema); void waitForDef(const FunctionSchema& schema);
void waitForImpl(const OperatorName& op_name, c10::optional<DispatchKey> dispatch_key); void waitForImpl(const OperatorName& op_name, std::optional<DispatchKey> dispatch_key);
// ------------------------------------------------------------------------ // ------------------------------------------------------------------------
// //
@ -221,7 +221,7 @@ public:
*/ */
// NB: steals the inferred function schema, as we may need to hold on to // NB: steals the inferred function schema, as we may need to hold on to
// it for a bit until the real schema turns up // it for a bit until the real schema turns up
RegistrationHandleRAII registerImpl(OperatorName op_name, c10::optional<DispatchKey> dispatch_key, KernelFunction kernel, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug); RegistrationHandleRAII registerImpl(OperatorName op_name, std::optional<DispatchKey> dispatch_key, KernelFunction kernel, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
/** /**
* Given an operator, tells the Dispatcher that we have implemented a fake impl * Given an operator, tells the Dispatcher that we have implemented a fake impl
@ -234,7 +234,7 @@ public:
*/ */
void throwIfHasPythonModule(OperatorName op_name); void throwIfHasPythonModule(OperatorName op_name);
c10::optional<std::pair<const char*, const char*>> getPyStub(OperatorName op_name); std::optional<std::pair<const char*, const char*>> getPyStub(OperatorName op_name);
/** /**
* Register a new operator by name. * Register a new operator by name.
@ -299,7 +299,7 @@ public:
* Returns the names of all operators with a kernel registered for the specified DispatchKey. * Returns the names of all operators with a kernel registered for the specified DispatchKey.
* If no DispatchKey is specified, it returns all registered operators. * If no DispatchKey is specified, it returns all registered operators.
*/ */
std::vector<OperatorName> getRegistrationsForDispatchKey(c10::optional<DispatchKey> k) const; std::vector<OperatorName> getRegistrationsForDispatchKey(std::optional<DispatchKey> k) const;
private: private:
Dispatcher(); Dispatcher();
@ -321,7 +321,7 @@ private:
void deregisterImpl_( void deregisterImpl_(
const OperatorHandle& op, const OperatorHandle& op,
const OperatorName& op_name, const OperatorName& op_name,
c10::optional<DispatchKey> dispatch_key, std::optional<DispatchKey> dispatch_key,
impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle); impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle);
void deregisterName_(const OperatorHandle& op, const OperatorName& op_name); void deregisterName_(const OperatorHandle& op, const OperatorName& op_name);
void deregisterFallback_(DispatchKey dispatchKey); void deregisterFallback_(DispatchKey dispatchKey);

View File

@ -7,7 +7,7 @@ namespace c10 {
namespace impl { namespace impl {
namespace { namespace {
std::string toString(c10::optional<DispatchKey> k) { std::string toString(std::optional<DispatchKey> k) {
if (k.has_value()) { if (k.has_value()) {
return toString(*k); return toString(*k);
} else { } else {
@ -39,7 +39,7 @@ namespace {
// TODO: figure out if we can just directly save real schema at def time // TODO: figure out if we can just directly save real schema at def time
FunctionSchema from_def = from_def_.cloneWithRealTypes(kernel.isValidSymUnboxed()); FunctionSchema from_def = from_def_.cloneWithRealTypes(kernel.isValidSymUnboxed());
FunctionSchema inferred = inferred_.cloneWithRealTypes(); FunctionSchema inferred = inferred_.cloneWithRealTypes();
c10::optional<std::string> schema_difference = findSchemaDifferences(from_def, inferred); std::optional<std::string> schema_difference = findSchemaDifferences(from_def, inferred);
if (schema_difference.has_value()) { if (schema_difference.has_value()) {
TORCH_CHECK(false, TORCH_CHECK(false,
"Inferred operator schema for a C++ kernel function doesn't match the expected function schema.\n" "Inferred operator schema for a C++ kernel function doesn't match the expected function schema.\n"
@ -101,9 +101,9 @@ void OperatorEntry::deregisterSchema() {
OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel( OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel(
const c10::Dispatcher& dispatcher, const c10::Dispatcher& dispatcher,
c10::optional<DispatchKey> dispatch_key, std::optional<DispatchKey> dispatch_key,
KernelFunction kernel, KernelFunction kernel,
c10::optional<CppSignature> cpp_signature, std::optional<CppSignature> cpp_signature,
std::unique_ptr<FunctionSchema> inferred_function_schema, std::unique_ptr<FunctionSchema> inferred_function_schema,
std::string debug std::string debug
) { ) {
@ -181,7 +181,7 @@ OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel(
void OperatorEntry::deregisterKernel_( void OperatorEntry::deregisterKernel_(
const c10::Dispatcher& dispatcher, const c10::Dispatcher& dispatcher,
c10::optional<DispatchKey> dispatch_key, std::optional<DispatchKey> dispatch_key,
AnnotatedKernelContainerIterator kernel AnnotatedKernelContainerIterator kernel
) { ) {
// Redirect catchAll deregistrations to CompositeImplicitAutograd. // Redirect catchAll deregistrations to CompositeImplicitAutograd.

View File

@ -129,9 +129,9 @@ public:
// Postcondition: caller is responsible for disposing of the kernel // Postcondition: caller is responsible for disposing of the kernel
AnnotatedKernelContainerIterator registerKernel( AnnotatedKernelContainerIterator registerKernel(
const Dispatcher& dispatcher, const Dispatcher& dispatcher,
c10::optional<DispatchKey> dispatch_key, std::optional<DispatchKey> dispatch_key,
KernelFunction kernel, KernelFunction kernel,
c10::optional<CppSignature> cpp_signature, std::optional<CppSignature> cpp_signature,
std::unique_ptr<FunctionSchema> inferred_function_schema, std::unique_ptr<FunctionSchema> inferred_function_schema,
std::string debug std::string debug
); );
@ -139,7 +139,7 @@ public:
// Precondition: Dispatcher::mutex_ is held // Precondition: Dispatcher::mutex_ is held
void deregisterKernel_( void deregisterKernel_(
const Dispatcher& dispatcher, const Dispatcher& dispatcher,
c10::optional<DispatchKey> dispatch_key, std::optional<DispatchKey> dispatch_key,
AnnotatedKernelContainerIterator kernel AnnotatedKernelContainerIterator kernel
); );
@ -221,7 +221,7 @@ public:
private: private:
OperatorName name_; OperatorName name_;
c10::optional<AnnotatedSchema> schema_; std::optional<AnnotatedSchema> schema_;
#ifndef C10_MOBILE #ifndef C10_MOBILE
std::vector<at::Tag> tags_; std::vector<at::Tag> tags_;
#endif #endif
@ -282,10 +282,10 @@ private:
struct CppSignatureWithDebug { struct CppSignatureWithDebug {
CppSignature signature; CppSignature signature;
std::string debug; std::string debug;
c10::optional<DispatchKey> dispatch_key; std::optional<DispatchKey> dispatch_key;
}; };
c10::optional<CppSignatureWithDebug> cpp_signature_; std::optional<CppSignatureWithDebug> cpp_signature_;
c10::optional<CppSignatureWithDebug> sym_cpp_signature_; std::optional<CppSignatureWithDebug> sym_cpp_signature_;
// A Python custom error handler for OperatorEntry::reportError // A Python custom error handler for OperatorEntry::reportError
std::unique_ptr<c10::SafePyObject> report_error_callback_; std::unique_ptr<c10::SafePyObject> report_error_callback_;

View File

@ -121,7 +121,7 @@ class DynamicType : public SharedType {
* A implementation detail to support NamedTuple. * A implementation detail to support NamedTuple.
*/ */
struct LabeledDynamicType { struct LabeledDynamicType {
c10::optional<std::string> label; std::optional<std::string> label;
DynamicTypePtr ty; DynamicTypePtr ty;
explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {} explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {}
@ -163,7 +163,7 @@ class DynamicType : public SharedType {
Tag tag() const { Tag tag() const {
return tag_; return tag_;
} }
const c10::optional<std::string>& name() const { const std::optional<std::string>& name() const {
return name_; return name_;
} }
const Arguments& arguments() const { const Arguments& arguments() const {
@ -200,7 +200,7 @@ class DynamicType : public SharedType {
} }
Tag tag_; Tag tag_;
c10::optional<std::string> name_; std::optional<std::string> name_;
union { union {
Arguments arguments_; Arguments arguments_;
ClassTypePtr class_; ClassTypePtr class_;

View File

@ -97,7 +97,7 @@ struct TORCH_API Function {
// executor. // executor.
virtual bool call( virtual bool call(
Stack&, Stack&,
c10::optional<size_t>, std::optional<size_t>,
c10::function_ref<void(const Code&)>) { c10::function_ref<void(const Code&)>) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
return false; return false;

View File

@ -30,7 +30,7 @@ FunctionSchema FunctionSchema::cloneWithRealTypes(bool with_symint) const {
// NB: keep this in sync with unpackSymInt in KernelFunction_impl.h // NB: keep this in sync with unpackSymInt in KernelFunction_impl.h
if ( if (
*a.real_type() == *getTypePtr<c10::SymInt>() || *a.real_type() == *getTypePtr<c10::SymInt>() ||
*a.real_type() == *getTypePtr<c10::optional<c10::SymInt>>() || *a.real_type() == *getTypePtr<std::optional<c10::SymInt>>() ||
*a.real_type() == *getTypePtr<c10::SymIntArrayRef>() || *a.real_type() == *getTypePtr<c10::SymIntArrayRef>() ||
*a.real_type() == *getTypePtr<at::OptionalSymIntArrayRef>() *a.real_type() == *getTypePtr<at::OptionalSymIntArrayRef>()
) { ) {
@ -53,7 +53,7 @@ FunctionSchema FunctionSchema::cloneWithRealTypes(bool with_symint) const {
is_varret()); is_varret());
} }
bool FunctionSchema::canAliasTypeSetsAlias(const c10::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const { bool FunctionSchema::canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const {
if (!lhs || !rhs) { if (!lhs || !rhs) {
return false; return false;
} }
@ -67,7 +67,7 @@ bool FunctionSchema::canAliasTypeSetsAlias(const c10::optional<AliasTypeSet> &lh
return false; return false;
} }
c10::optional<AliasTypeSet> FunctionSchema::getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const { std::optional<AliasTypeSet> FunctionSchema::getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const {
if (!aliasTypeSet) { if (!aliasTypeSet) {
return c10::nullopt; return c10::nullopt;
} }
@ -95,7 +95,7 @@ c10::optional<AliasTypeSet> FunctionSchema::getAliasTypeSetContainedTypes(const
return AliasTypeSet(containedTypes.begin(), containedTypes.end()); return AliasTypeSet(containedTypes.begin(), containedTypes.end());
} }
c10::optional<AliasTypeSet> FunctionSchema::mapTypeToAliasTypeSet(const TypePtr& type) const { std::optional<AliasTypeSet> FunctionSchema::mapTypeToAliasTypeSet(const TypePtr& type) const {
switch(type->kind()) { switch(type->kind()) {
case TypeKind::ListType: case TypeKind::ListType:
case TypeKind::DictType: case TypeKind::DictType:
@ -155,8 +155,8 @@ bool FunctionSchema::may_alias(const SchemaArgument& lhs, const SchemaArgument&
const Argument lhsArg = getCorrectList(lhs.type)[lhs.index]; const Argument lhsArg = getCorrectList(lhs.type)[lhs.index];
const Argument rhsArg = getCorrectList(rhs.type)[rhs.index]; const Argument rhsArg = getCorrectList(rhs.type)[rhs.index];
c10::optional<AliasTypeSet> lhsTypes = mapTypeToAliasTypeSet(lhsArg.type()); std::optional<AliasTypeSet> lhsTypes = mapTypeToAliasTypeSet(lhsArg.type());
c10::optional<AliasTypeSet> rhsTypes = mapTypeToAliasTypeSet(rhsArg.type()); std::optional<AliasTypeSet> rhsTypes = mapTypeToAliasTypeSet(rhsArg.type());
// Check to see if lhs and rhs have the same alias set // Check to see if lhs and rhs have the same alias set
if (canAliasTypeSetsAlias(lhsTypes, rhsTypes)) { if (canAliasTypeSetsAlias(lhsTypes, rhsTypes)) {
@ -182,10 +182,10 @@ bool FunctionSchema::may_contain_alias(const SchemaArgument& lhs, const SchemaAr
const c10::Argument lhsArg = getCorrectList(lhs.type)[lhs.index]; const c10::Argument lhsArg = getCorrectList(lhs.type)[lhs.index];
const c10::Argument rhsArg = getCorrectList(rhs.type)[rhs.index]; const c10::Argument rhsArg = getCorrectList(rhs.type)[rhs.index];
c10::optional<AliasTypeSet> lhsTypes = mapTypeToAliasTypeSet(lhsArg.type()); std::optional<AliasTypeSet> lhsTypes = mapTypeToAliasTypeSet(lhsArg.type());
c10::optional<AliasTypeSet> rhsTypes = mapTypeToAliasTypeSet(rhsArg.type()); std::optional<AliasTypeSet> rhsTypes = mapTypeToAliasTypeSet(rhsArg.type());
c10::optional<AliasTypeSet> lhsContainedTypes = getAliasTypeSetContainedTypes(lhsTypes); std::optional<AliasTypeSet> lhsContainedTypes = getAliasTypeSetContainedTypes(lhsTypes);
c10::optional<AliasTypeSet> rhsContainedTypes = getAliasTypeSetContainedTypes(rhsTypes); std::optional<AliasTypeSet> rhsContainedTypes = getAliasTypeSetContainedTypes(rhsTypes);
// Checks if one side is wildcard and the other side is a container of the same type // Checks if one side is wildcard and the other side is a container of the same type
bool lhsWildcard = lhsArg.alias_info() && lhsArg.alias_info()->isWildcardAfter() && canAliasTypeSetsAlias(lhsTypes, rhsContainedTypes); bool lhsWildcard = lhsArg.alias_info() && lhsArg.alias_info()->isWildcardAfter() && canAliasTypeSetsAlias(lhsTypes, rhsContainedTypes);

View File

@ -29,20 +29,20 @@ struct Argument {
Argument( Argument(
std::string name = "", std::string name = "",
const TypePtr& type = nullptr, const TypePtr& type = nullptr,
c10::optional<int32_t> N = c10::nullopt, std::optional<int32_t> N = c10::nullopt,
c10::optional<IValue> default_value = c10::nullopt, std::optional<IValue> default_value = c10::nullopt,
bool kwarg_only = false, bool kwarg_only = false,
c10::optional<AliasInfo> alias_info = c10::nullopt) std::optional<AliasInfo> alias_info = c10::nullopt)
: Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {} : Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {}
Argument( Argument(
std::string name, std::string name,
TypePtr fake_type, TypePtr fake_type,
TypePtr real_type, TypePtr real_type,
c10::optional<int32_t> N = c10::nullopt, std::optional<int32_t> N = c10::nullopt,
c10::optional<IValue> default_value = c10::nullopt, std::optional<IValue> default_value = c10::nullopt,
bool kwarg_only = false, bool kwarg_only = false,
c10::optional<AliasInfo> alias_info = c10::nullopt) std::optional<AliasInfo> alias_info = c10::nullopt)
: name_(std::move(name)), : name_(std::move(name)),
type_(fake_type ? std::move(fake_type) : TensorType::get()), type_(fake_type ? std::move(fake_type) : TensorType::get()),
real_type_(real_type ? std::move(real_type) : type_), real_type_(real_type ? std::move(real_type) : type_),
@ -94,10 +94,10 @@ struct Argument {
const TypePtr& real_type() const { const TypePtr& real_type() const {
return real_type_; return real_type_;
} }
c10::optional<int32_t> N() const { std::optional<int32_t> N() const {
return N_; return N_;
} }
const c10::optional<IValue>& default_value() const { const std::optional<IValue>& default_value() const {
return default_value_; return default_value_;
} }
bool kwarg_only() const { bool kwarg_only() const {
@ -150,7 +150,7 @@ struct Argument {
N_, N_,
default_value_, default_value_,
kwarg_only_, kwarg_only_,
alias_info_ ? c10::optional<AliasInfo>(*alias_info_) : c10::nullopt); alias_info_ ? std::optional<AliasInfo>(*alias_info_) : c10::nullopt);
} }
// this function checks whether this Argument is backward compatible with // this function checks whether this Argument is backward compatible with
@ -179,9 +179,9 @@ struct Argument {
// e.g. for int[3]: type = ListType::ofInts(), N = 3 // e.g. for int[3]: type = ListType::ofInts(), N = 3
// If present, this will allow scalars to be broadcast to this length to // If present, this will allow scalars to be broadcast to this length to
// become a list. // become a list.
c10::optional<int32_t> N_; std::optional<int32_t> N_;
c10::optional<IValue> default_value_; std::optional<IValue> default_value_;
// AliasInfo is huge, so let's only allocate memory for it if // AliasInfo is huge, so let's only allocate memory for it if
// necessary (which it isn't during schema parsing on startup, to // necessary (which it isn't during schema parsing on startup, to
// give a pertinent example). // give a pertinent example).
@ -322,7 +322,7 @@ struct TORCH_API FunctionSchema {
// alias information should we infer? // alias information should we infer?
// NB: due to alias analysis kind merging, this may be nullopt. Eventually // NB: due to alias analysis kind merging, this may be nullopt. Eventually
// this should always be set no matter what // this should always be set no matter what
c10::optional<AliasAnalysisKind> alias_kind_; std::optional<AliasAnalysisKind> alias_kind_;
template <typename T> template <typename T>
void checkArg(const IValue& value, const Argument& argument, optional<size_t> pos) const; void checkArg(const IValue& value, const Argument& argument, optional<size_t> pos) const;
@ -395,7 +395,7 @@ struct TORCH_API FunctionSchema {
return aliasInfo && aliasInfo->isWrite(); return aliasInfo && aliasInfo->isWrite();
} }
bool is_mutable(c10::string_view name) const { bool is_mutable(c10::string_view name) const {
c10::optional<int> index = argumentIndexWithName(name); std::optional<int> index = argumentIndexWithName(name);
TORCH_INTERNAL_ASSERT( TORCH_INTERNAL_ASSERT(
index != c10::nullopt, "Schema has no argument named ", name); index != c10::nullopt, "Schema has no argument named ", name);
@ -416,22 +416,22 @@ struct TORCH_API FunctionSchema {
// Returns whether the two AliasTypeSets contain any similarities // Returns whether the two AliasTypeSets contain any similarities
// ie: whether the two type sets can alias. // ie: whether the two type sets can alias.
bool canAliasTypeSetsAlias(const c10::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const; bool canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const;
// Recursively Finds all contained types within the AliasTypeSet. // Recursively Finds all contained types within the AliasTypeSet.
c10::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const; std::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const;
// Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp. // Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp.
// Used to map types to a type such that all types that can alias will be mapped to the same type. // Used to map types to a type such that all types that can alias will be mapped to the same type.
// For example, calling this method on 'Optional[List[int]]' is the same as calling this method // For example, calling this method on 'Optional[List[int]]' is the same as calling this method
// on 'List[int]'. // on 'List[int]'.
c10::optional<AliasTypeSet> mapTypeToAliasTypeSet(const TypePtr& type) const; std::optional<AliasTypeSet> mapTypeToAliasTypeSet(const TypePtr& type) const;
// Returns either arguments() or returns() depending on the SchemaArgType // Returns either arguments() or returns() depending on the SchemaArgType
// output => returns(), input => arguments() // output => returns(), input => arguments()
const std::vector<Argument>& getCorrectList(SchemaArgType type) const; const std::vector<Argument>& getCorrectList(SchemaArgType type) const;
c10::optional<int> argumentIndexWithName(c10::string_view name) const { std::optional<int> argumentIndexWithName(c10::string_view name) const {
for (const auto i : c10::irange(arguments().size())) { for (const auto i : c10::irange(arguments().size())) {
if(name == arguments()[i].name()) if(name == arguments()[i].name())
return i; return i;
@ -470,8 +470,8 @@ struct TORCH_API FunctionSchema {
std::string formatTypeMismatchMsg( std::string formatTypeMismatchMsg(
const Argument& expected, const Argument& expected,
const std::string& actual_type, const std::string& actual_type,
c10::optional<size_t> position = c10::nullopt, std::optional<size_t> position = c10::nullopt,
c10::optional<std::string> value = c10::nullopt) const; std::optional<std::string> value = c10::nullopt) const;
FunctionSchema cloneWithRemappedTypes( FunctionSchema cloneWithRemappedTypes(
const std::function<TypePtr(TypePtr)> type_map) const; const std::function<TypePtr(TypePtr)> type_map) const;
@ -514,7 +514,7 @@ struct TORCH_API FunctionSchema {
alias_kind_ = v; alias_kind_ = v;
} }
c10::optional<c10::string_view> getNamespace() const { std::optional<c10::string_view> getNamespace() const {
return name_.getNamespace(); return name_.getNamespace();
} }

View File

@ -162,8 +162,8 @@ inline bool Argument::isForwardCompatibleWith(
inline std::string FunctionSchema::formatTypeMismatchMsg( inline std::string FunctionSchema::formatTypeMismatchMsg(
const Argument& expected, const Argument& expected,
const std::string& actual_type, const std::string& actual_type,
c10::optional<size_t> position, std::optional<size_t> position,
c10::optional<std::string> value) const { std::optional<std::string> value) const {
std::string position_str; std::string position_str;
if (position) { if (position) {
position_str = c10::str("Position: ", *position, "\n"); position_str = c10::str("Position: ", *position, "\n");

View File

@ -471,7 +471,7 @@ bool IValue::isOptionalTensorList() const {
return false; return false;
} }
const auto& ty = static_cast<detail::ListImpl*>(payload.u.as_intrusive_ptr)->elementType; const auto& ty = static_cast<detail::ListImpl*>(payload.u.as_intrusive_ptr)->elementType;
const auto& expected_ty = c10::getTypePtr<c10::optional<at::Tensor>>(); const auto& expected_ty = c10::getTypePtr<std::optional<at::Tensor>>();
return expected_ty == ty; return expected_ty == ty;
} }
@ -886,14 +886,14 @@ c10::intrusive_ptr<ivalue::Object> ivalue::Object::create(
StrongTypePtr(nullptr, std::move(classType)), numSlots); StrongTypePtr(nullptr, std::move(classType)), numSlots);
} }
IValue IValue::deepcopy(c10::optional<at::Device> device) const { IValue IValue::deepcopy(std::optional<at::Device> device) const {
IValue::HashAliasedIValueMap memo; IValue::HashAliasedIValueMap memo;
return deepcopy(memo, device); return deepcopy(memo, device);
} }
IValue IValue::deepcopy( IValue IValue::deepcopy(
IValue::HashAliasedIValueMap& memo, IValue::HashAliasedIValueMap& memo,
c10::optional<at::Device> device) const { std::optional<at::Device> device) const {
if (memo.count(*this)) { if (memo.count(*this)) {
return memo.at(*this); return memo.at(*this);
} }
@ -1027,14 +1027,14 @@ c10::intrusive_ptr<ivalue::Object> ivalue::Object::copy_to_weak_compilation_ref(
} }
c10::intrusive_ptr<ivalue::Object> ivalue::Object::deepcopy( c10::intrusive_ptr<ivalue::Object> ivalue::Object::deepcopy(
c10::optional<at::Device> device) const { std::optional<at::Device> device) const {
IValue::HashAliasedIValueMap memo; IValue::HashAliasedIValueMap memo;
return deepcopy(memo, device); return deepcopy(memo, device);
} }
c10::intrusive_ptr<ivalue::Object> ivalue::Object::deepcopy( c10::intrusive_ptr<ivalue::Object> ivalue::Object::deepcopy(
IValue::HashAliasedIValueMap& memo, IValue::HashAliasedIValueMap& memo,
c10::optional<at::Device> device) const { std::optional<at::Device> device) const {
auto cu = type_.cu_; auto cu = type_.cu_;
auto object = ivalue::Object::create(WeakOrStrongTypePtr(type_.cu_, type_.type_), type()->numAttributes()); auto object = ivalue::Object::create(WeakOrStrongTypePtr(type_.cu_, type_.type_), type()->numAttributes());
for (const auto i : c10::irange(slots_.size())) { for (const auto i : c10::irange(slots_.size())) {

View File

@ -86,20 +86,20 @@ struct StreamData3Holder : c10::intrusive_ptr_target {
} // namespace ivalue } // namespace ivalue
// This is an owning wrapper for a c10::optional<std::vector<T>> // This is an owning wrapper for a std::optional<std::vector<T>>
// that can be implicitly converted to a (non-owning) optional<ArrayRef<T>>. // that can be implicitly converted to a (non-owning) optional<ArrayRef<T>>.
// Its purpose is to be used in generated code to keep the vector alive // Its purpose is to be used in generated code to keep the vector alive
// either until the end of a statement (as a temporary), or as a saved arg // either until the end of a statement (as a temporary), or as a saved arg
// in autograd. // in autograd.
template <typename T> template <typename T>
struct OptionalArray { struct OptionalArray {
c10::optional<std::vector<T>> list; std::optional<std::vector<T>> list;
OptionalArray() = default; OptionalArray() = default;
OptionalArray(std::vector<T> val) : list(std::move(val)) {} OptionalArray(std::vector<T> val) : list(std::move(val)) {}
// Used when saving an argument for the backwards pass. // Used when saving an argument for the backwards pass.
OptionalArray& operator=(c10::optional<ArrayRef<T>> ref) { OptionalArray& operator=(std::optional<ArrayRef<T>> ref) {
if (ref) { if (ref) {
list = std::vector<T>(ref->begin(), ref->end()); list = std::vector<T>(ref->begin(), ref->end());
} else { } else {
@ -118,7 +118,7 @@ struct OptionalArray {
return *this; return *this;
} }
operator c10::optional<c10::ArrayRef<T>>() { operator std::optional<c10::ArrayRef<T>>() {
if (!list) { if (!list) {
return nullopt; return nullopt;
} }
@ -697,7 +697,7 @@ struct TORCH_API IValue final {
c10::intrusive_ptr<ivalue::ConstantString> toString() &&; c10::intrusive_ptr<ivalue::ConstantString> toString() &&;
c10::intrusive_ptr<ivalue::ConstantString> toString() const&; c10::intrusive_ptr<ivalue::ConstantString> toString() const&;
const std::string& toStringRef() const; const std::string& toStringRef() const;
c10::optional<std::reference_wrapper<const std::string>> toOptionalStringRef() std::optional<std::reference_wrapper<const std::string>> toOptionalStringRef()
const; const;
c10::string_view toStringView() const; c10::string_view toStringView() const;
@ -726,9 +726,9 @@ struct TORCH_API IValue final {
// OptionalTensorList // OptionalTensorList
bool isOptionalTensorList() const; bool isOptionalTensorList() const;
c10::List<c10::optional<at::Tensor>> toOptionalTensorList() &&; c10::List<std::optional<at::Tensor>> toOptionalTensorList() &&;
c10::List<c10::optional<at::Tensor>> toOptionalTensorList() const&; c10::List<std::optional<at::Tensor>> toOptionalTensorList() const&;
std::vector<c10::optional<at::Tensor>> toOptionalTensorVector() const; std::vector<std::optional<at::Tensor>> toOptionalTensorVector() const;
// GenericList // GenericList
IValue(c10::List<IValue> v); IValue(c10::List<IValue> v);
@ -817,7 +817,7 @@ struct TORCH_API IValue final {
IValue(std::unordered_map<Key, Value> v); IValue(std::unordered_map<Key, Value> v);
template <class T, enable_if_ivalue_constructible<T> = nullptr> template <class T, enable_if_ivalue_constructible<T> = nullptr>
IValue(c10::optional<T> v); IValue(std::optional<T> v);
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr> template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
IValue(c10::OptionalArrayRef<T> v); IValue(c10::OptionalArrayRef<T> v);
IValue(c10::nullopt_t); IValue(c10::nullopt_t);
@ -1128,10 +1128,10 @@ struct TORCH_API IValue final {
// TODO: There are several places that recurse over IValue. This is fragile. // TODO: There are several places that recurse over IValue. This is fragile.
// This visitor should be used to recurse over ivalues. // This visitor should be used to recurse over ivalues.
void visit(const std::function<bool(const IValue&)>& visitor) const; void visit(const std::function<bool(const IValue&)>& visitor) const;
IValue deepcopy(c10::optional<at::Device> device = c10::nullopt) const; IValue deepcopy(std::optional<at::Device> device = c10::nullopt) const;
IValue deepcopy( IValue deepcopy(
HashAliasedIValueMap& memo, HashAliasedIValueMap& memo,
c10::optional<at::Device> device = c10::nullopt) const; std::optional<at::Device> device = c10::nullopt) const;
private: private:
static c10::intrusive_ptr_target* null_to_undefined_tensor( static c10::intrusive_ptr_target* null_to_undefined_tensor(
@ -1530,8 +1530,8 @@ struct WeakOrStrongCompilationUnit {
return holdingStrongRef() && *strong_ptr_ == nullptr; return holdingStrongRef() && *strong_ptr_ == nullptr;
} }
c10::optional<std::shared_ptr<torch::jit::CompilationUnit>> strong_ptr_; std::optional<std::shared_ptr<torch::jit::CompilationUnit>> strong_ptr_;
c10::optional<std::weak_ptr<torch::jit::CompilationUnit>> weak_ptr_; std::optional<std::weak_ptr<torch::jit::CompilationUnit>> weak_ptr_;
}; };
// An Object will hold a non-owning Compilation Unit reference if it is a // An Object will hold a non-owning Compilation Unit reference if it is a

View File

@ -909,7 +909,7 @@ struct C10_EXPORT ivalue::Future final : c10::intrusive_ptr_target {
using WeakStorage = c10::weak_intrusive_ptr<c10::StorageImpl>; using WeakStorage = c10::weak_intrusive_ptr<c10::StorageImpl>;
void markCompleted( void markCompleted(
IValue value, IValue value,
c10::optional<std::vector<WeakStorage>> storages = c10::nullopt) { std::optional<std::vector<WeakStorage>> storages = c10::nullopt) {
// Start by performing all steps that can throw, before setting any field. // Start by performing all steps that can throw, before setting any field.
// Do this before even acquiring the mutex, because extractStorages might // Do this before even acquiring the mutex, because extractStorages might
// acquire the GIL, which could lead to a lock inversion with our mutex. // acquire the GIL, which could lead to a lock inversion with our mutex.
@ -1586,11 +1586,11 @@ struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target {
c10::intrusive_ptr<Object> copy() const; c10::intrusive_ptr<Object> copy() const;
c10::intrusive_ptr<Object> deepcopy( c10::intrusive_ptr<Object> deepcopy(
c10::optional<at::Device> device = c10::nullopt) const; std::optional<at::Device> device = c10::nullopt) const;
c10::intrusive_ptr<Object> deepcopy( c10::intrusive_ptr<Object> deepcopy(
IValue::HashAliasedIValueMap& memo, IValue::HashAliasedIValueMap& memo,
c10::optional<at::Device> device = c10::nullopt) const; std::optional<at::Device> device = c10::nullopt) const;
bool is_weak_compilation_ref() const { bool is_weak_compilation_ref() const {
return !type_.holds_strong_ref(); return !type_.holds_strong_ref();
@ -1613,7 +1613,7 @@ struct ivalue::PyObjectHolder : c10::intrusive_ptr_target {
public: public:
virtual PyObject* getPyObject() = 0; virtual PyObject* getPyObject() = 0;
virtual c10::InferredType tryToInferType() = 0; virtual c10::InferredType tryToInferType() = 0;
virtual IValue toIValue(const TypePtr& type, c10::optional<int32_t> N = c10::nullopt) = 0; virtual IValue toIValue(const TypePtr& type, std::optional<int32_t> N = c10::nullopt) = 0;
virtual std::string toStr() = 0; virtual std::string toStr() = 0;
virtual std::vector<at::Tensor> extractTensors() = 0; virtual std::vector<at::Tensor> extractTensors() = 0;
@ -1909,7 +1909,7 @@ std::unordered_map<K, V> generic_to(
} }
template <typename T> template <typename T>
c10::optional<T> generic_to(IValue ivalue, _fake_type<c10::optional<T>>) { std::optional<T> generic_to(IValue ivalue, _fake_type<c10::optional<T>>) {
if (ivalue.isNone()) { if (ivalue.isNone()) {
return c10::nullopt; return c10::nullopt;
} }
@ -1946,11 +1946,11 @@ inline T IValue::to() && {
} }
template <> template <>
inline c10::optional<c10::string_view> IValue::to() && { inline std::optional<c10::string_view> IValue::to() && {
// In the default implementation, the IValue is destroyed with std::move. // In the default implementation, the IValue is destroyed with std::move.
// But if the unboxed type is optional<string_view> we cannot destroy // But if the unboxed type is optional<string_view> we cannot destroy
// the IValue. // the IValue.
return generic_to(*this, _fake_type<c10::optional<c10::string_view>>{}); return generic_to(*this, _fake_type<std::optional<c10::string_view>>{});
} }
template <typename T> template <typename T>
@ -2046,20 +2046,20 @@ inline std::vector<at::Tensor> IValue::toTensorVector() const {
return createVectorFromList<at::Tensor>( return createVectorFromList<at::Tensor>(
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr)); static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
} }
inline c10::List<c10::optional<at::Tensor>> IValue::toOptionalTensorList() && { inline c10::List<std::optional<at::Tensor>> IValue::toOptionalTensorList() && {
AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
return c10::List<c10::optional<at::Tensor>>(moveToIntrusivePtr<c10::detail::ListImpl>()); return c10::List<std::optional<at::Tensor>>(moveToIntrusivePtr<c10::detail::ListImpl>());
} }
inline c10::List<c10::optional<at::Tensor>> IValue::toOptionalTensorList() const& { inline c10::List<std::optional<at::Tensor>> IValue::toOptionalTensorList() const& {
AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
return c10::List<c10::optional<at::Tensor>>(toIntrusivePtr<c10::detail::ListImpl>()); return c10::List<std::optional<at::Tensor>>(toIntrusivePtr<c10::detail::ListImpl>());
} }
inline std::vector<c10::optional<at::Tensor>> IValue::toOptionalTensorVector() const { inline std::vector<std::optional<at::Tensor>> IValue::toOptionalTensorVector() const {
AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
TORCH_INTERNAL_ASSERT_DEBUG_ONLY( TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
"called toOptionalTensorVector on null intrusive_ptr IValue"); "called toOptionalTensorVector on null intrusive_ptr IValue");
return createVectorFromList<c10::optional<at::Tensor>>( return createVectorFromList<std::optional<at::Tensor>>(
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr)); static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
} }
inline c10::List<IValue> IValue::toList() && { inline c10::List<IValue> IValue::toList() && {
@ -2274,7 +2274,7 @@ inline IValue::IValue(std::unordered_map<Key, Value> v)
} }
template <class T, IValue::enable_if_ivalue_constructible<T>> template <class T, IValue::enable_if_ivalue_constructible<T>>
inline IValue::IValue(c10::optional<T> v) : IValue() { inline IValue::IValue(std::optional<T> v) : IValue() {
if (v.has_value()) { if (v.has_value()) {
*this = IValue(std::move(*v)); *this = IValue(std::move(*v));
} }
@ -2360,7 +2360,7 @@ inline const std::string& IValue::toStringRef() const {
payload.u.as_intrusive_ptr) payload.u.as_intrusive_ptr)
->string(); ->string();
} }
inline c10::optional<std::reference_wrapper<const std::string>> IValue:: inline std::optional<std::reference_wrapper<const std::string>> IValue::
toOptionalStringRef() const { toOptionalStringRef() const {
if (isNone()) { if (isNone()) {
return c10::nullopt; return c10::nullopt;

View File

@ -32,7 +32,7 @@ class Dict;
struct IValue; struct IValue;
struct FunctionSchema; struct FunctionSchema;
struct NamedType; struct NamedType;
using OptNameList = c10::optional<std::vector<std::string>>; using OptNameList = std::optional<std::vector<std::string>>;
void standardizeVectorForUnion(std::vector<TypePtr>& reference, std::vector<TypePtr>* to_fill); void standardizeVectorForUnion(std::vector<TypePtr>& reference, std::vector<TypePtr>* to_fill);
void standardizeVectorForUnion(std::vector<TypePtr>* to_flatten); void standardizeVectorForUnion(std::vector<TypePtr>* to_flatten);
@ -164,9 +164,9 @@ struct TORCH_API UnionType : public SharedType {
return has_free_variables_; return has_free_variables_;
} }
c10::optional<TypePtr> toOptional() const; std::optional<TypePtr> toOptional() const;
c10::optional<TypePtr> subtractTypeSet(std::vector<TypePtr>& to_subtract) const; std::optional<TypePtr> subtractTypeSet(std::vector<TypePtr>& to_subtract) const;
protected: protected:
explicit UnionType(std::vector<TypePtr> types, TypeKind kind=TypeKind::UnionType); explicit UnionType(std::vector<TypePtr> types, TypeKind kind=TypeKind::UnionType);
@ -247,13 +247,13 @@ struct TORCH_API OptionalType : public UnionType {
}; };
template <typename T> template <typename T>
inline c10::optional<T> merge_primitive( inline std::optional<T> merge_primitive(
const c10::optional<T>& a, const std::optional<T>& a,
const c10::optional<T>& b) { const std::optional<T>& b) {
if (a.has_value() && b.has_value() && a.value() == b.value()) { if (a.has_value() && b.has_value() && a.value() == b.value()) {
return a; return a;
} }
return c10::optional<T>{}; return std::optional<T>{};
} }
// If we see `a + b + c` and know that a, b, and c are the same size and have // If we see `a + b + c` and know that a, b, and c are the same size and have
@ -274,9 +274,9 @@ inline c10::optional<T> merge_primitive(
struct TORCH_API Stride { struct TORCH_API Stride {
Stride() = default; Stride() = default;
Stride( Stride(
const c10::optional<size_t>& stride_index, const std::optional<size_t>& stride_index,
c10::optional<bool> contiguous, std::optional<bool> contiguous,
const c10::optional<size_t>& stride) const std::optional<size_t>& stride)
: stride_index_(stride_index), contiguous_(contiguous), stride_(stride) {} : stride_index_(stride_index), contiguous_(contiguous), stride_(stride) {}
bool operator==(const Stride& b) const { bool operator==(const Stride& b) const {
@ -288,17 +288,17 @@ struct TORCH_API Stride {
return stride_index_ && contiguous_ && stride_; return stride_index_ && contiguous_ && stride_;
} }
c10::optional<size_t> stride_index_; std::optional<size_t> stride_index_;
c10::optional<bool> contiguous_; std::optional<bool> contiguous_;
c10::optional<size_t> stride_; std::optional<size_t> stride_;
}; };
template <> template <>
inline c10::optional<Stride> merge_primitive( inline std::optional<Stride> merge_primitive(
const c10::optional<Stride>& a, const std::optional<Stride>& a,
const c10::optional<Stride>& b) { const std::optional<Stride>& b) {
c10::optional<Stride> left = a; std::optional<Stride> left = a;
c10::optional<Stride> right = b; std::optional<Stride> right = b;
if (!left.has_value()) { if (!left.has_value()) {
left = {Stride()}; left = {Stride()};
} }
@ -314,7 +314,7 @@ inline c10::optional<Stride> merge_primitive(
// normalize // normalize
if (!r.stride_index_.has_value() && !r.contiguous_.has_value() && if (!r.stride_index_.has_value() && !r.contiguous_.has_value() &&
!r.stride_.has_value()) { !r.stride_.has_value()) {
return c10::optional<Stride>{}; return std::optional<Stride>{};
} }
return r; return r;
@ -375,7 +375,7 @@ struct TORCH_API SymbolicShape {
SymbolicShape() : dims_(c10::nullopt) {} SymbolicShape() : dims_(c10::nullopt) {}
// Known rank but unknown dimentions. // Known rank but unknown dimentions.
SymbolicShape(c10::optional<size_t> rank) : dims_(c10::nullopt) { SymbolicShape(std::optional<size_t> rank) : dims_(c10::nullopt) {
if(!rank) { if(!rank) {
return; return;
} }
@ -389,10 +389,10 @@ struct TORCH_API SymbolicShape {
} }
// Mix of known and unknown ranks // Mix of known and unknown ranks
SymbolicShape(const std::vector<c10::optional<int64_t>>& dims) { SymbolicShape(const std::vector<std::optional<int64_t>>& dims) {
std::vector<ShapeSymbol> shape_symbols; std::vector<ShapeSymbol> shape_symbols;
shape_symbols.reserve(dims.size()); shape_symbols.reserve(dims.size());
for(c10::optional<int64_t> dim: dims) { for(std::optional<int64_t> dim: dims) {
if(!dim) { if(!dim) {
shape_symbols.push_back(ShapeSymbol::newSymbol()); shape_symbols.push_back(ShapeSymbol::newSymbol());
} else { } else {
@ -430,18 +430,18 @@ struct TORCH_API SymbolicShape {
} }
// Returns rank or nullopt in case of unranked shape. // Returns rank or nullopt in case of unranked shape.
c10::optional<size_t> rank() const { std::optional<size_t> rank() const {
if(!dims_) { if(!dims_) {
return c10::nullopt; return c10::nullopt;
} }
return dims_->size(); return dims_->size();
} }
c10::optional<std::vector<ShapeSymbol>> sizes() const { std::optional<std::vector<ShapeSymbol>> sizes() const {
return dims_; return dims_;
} }
c10::optional<std::vector<bool>> symbolicDims() const { std::optional<std::vector<bool>> symbolicDims() const {
if (!dims_) { if (!dims_) {
return c10::nullopt; return c10::nullopt;
} }
@ -482,7 +482,7 @@ struct TORCH_API SymbolicShape {
} }
private: private:
c10::optional<std::vector<ShapeSymbol>> dims_; std::optional<std::vector<ShapeSymbol>> dims_;
}; };
namespace detail { namespace detail {
@ -498,14 +498,14 @@ inline bool isComplete(const T& /*t*/) {
template <typename T> template <typename T>
struct VaryingShape { struct VaryingShape {
using ListOfOptionalElements = std::vector<c10::optional<T>>; using ListOfOptionalElements = std::vector<std::optional<T>>;
VaryingShape(const std::vector<T>& vec) VaryingShape(const std::vector<T>& vec)
: VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {} : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {}
VaryingShape(c10::ArrayRef<T> vec) VaryingShape(c10::ArrayRef<T> vec)
: VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {} : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {}
VaryingShape(c10::optional<size_t> size = c10::nullopt) : dims_(c10::nullopt) { VaryingShape(std::optional<size_t> size = c10::nullopt) : dims_(c10::nullopt) {
if (size) { if (size) {
dims_ = ListOfOptionalElements(*size); dims_ = ListOfOptionalElements(*size);
} }
@ -513,20 +513,20 @@ struct VaryingShape {
VaryingShape(ListOfOptionalElements dims) : dims_(std::move(dims)) {} VaryingShape(ListOfOptionalElements dims) : dims_(std::move(dims)) {}
VaryingShape(size_t size) : VaryingShape(c10::optional<size_t>(size)) {} VaryingShape(size_t size) : VaryingShape(std::optional<size_t>(size)) {}
bool operator==(const VaryingShape& other) const { bool operator==(const VaryingShape& other) const {
return dims_ == other.dims_; return dims_ == other.dims_;
} }
const c10::optional<T> &operator[](size_t i) const { const std::optional<T> &operator[](size_t i) const {
if (!dims_) { if (!dims_) {
throw std::runtime_error("Rank isn't fixed"); throw std::runtime_error("Rank isn't fixed");
} }
return (*dims_).at(i); return (*dims_).at(i);
} }
c10::optional<size_t> size() const { std::optional<size_t> size() const {
if (!dims_) { if (!dims_) {
return c10::nullopt; return c10::nullopt;
} }
@ -534,13 +534,13 @@ struct VaryingShape {
return dims.size(); return dims.size();
} }
const c10::optional<ListOfOptionalElements>& sizes() const { const std::optional<ListOfOptionalElements>& sizes() const {
return dims_; return dims_;
} }
TORCH_API VaryingShape merge(const VaryingShape& other) const; TORCH_API VaryingShape merge(const VaryingShape& other) const;
c10::optional<std::vector<T>> concrete_sizes() const { std::optional<std::vector<T>> concrete_sizes() const {
if (!dims_) { if (!dims_) {
return c10::nullopt; return c10::nullopt;
} }
@ -568,7 +568,7 @@ struct VaryingShape {
} }
private: private:
c10::optional<ListOfOptionalElements> dims_; std::optional<ListOfOptionalElements> dims_;
}; };
struct TensorType; struct TensorType;
@ -581,27 +581,27 @@ struct TORCH_API TensorType : public SharedType {
// used by TensorType::create(size_t dim) which in turn used by // used by TensorType::create(size_t dim) which in turn used by
// shape_analysis.cpp // shape_analysis.cpp
static TensorTypePtr create( static TensorTypePtr create(
c10::optional<at::ScalarType> scalar_type, std::optional<at::ScalarType> scalar_type,
c10::optional<Device> device, std::optional<Device> device,
const VaryingShape<int64_t>& sizes, const VaryingShape<int64_t>& sizes,
const VaryingShape<int64_t>& strides, const VaryingShape<int64_t>& strides,
c10::optional<bool> requires_grad, std::optional<bool> requires_grad,
c10::optional<bool> undefined = false, std::optional<bool> undefined = false,
bool tensor_contiguity = false); bool tensor_contiguity = false);
static TensorTypePtr create( static TensorTypePtr create(
c10::optional<at::ScalarType> scalar_type, std::optional<at::ScalarType> scalar_type,
c10::optional<Device> device, std::optional<Device> device,
const SymbolicShape& sizes, const SymbolicShape& sizes,
const VaryingShape<Stride>& stride_, const VaryingShape<Stride>& stride_,
c10::optional<bool> requires_grad, std::optional<bool> requires_grad,
c10::optional<bool> undefined = false); std::optional<bool> undefined = false);
static TensorTypePtr create( static TensorTypePtr create(
c10::optional<at::ScalarType> scalar_type, std::optional<at::ScalarType> scalar_type,
c10::optional<Device> device, std::optional<Device> device,
c10::optional<size_t> dim, std::optional<size_t> dim,
c10::optional<bool> requires_grad); std::optional<bool> requires_grad);
// overloaded create variadic template argument as it could not distinguish // overloaded create variadic template argument as it could not distinguish
// initializer list // initializer list
@ -613,7 +613,7 @@ struct TORCH_API TensorType : public SharedType {
static TypePtr fromNumberType(const Type& typ); static TypePtr fromNumberType(const Type& typ);
static TypePtr fromBoolType(); static TypePtr fromBoolType();
c10::optional<size_t> dim() const { std::optional<size_t> dim() const {
return sizes().size(); return sizes().size();
} }
@ -625,13 +625,13 @@ struct TORCH_API TensorType : public SharedType {
return strides_; return strides_;
} }
c10::optional<at::Device> device() const { std::optional<at::Device> device() const {
return device_; return device_;
} }
c10::optional<at::ScalarType> scalarType() const { std::optional<at::ScalarType> scalarType() const {
return scalar_type_; return scalar_type_;
} }
c10::optional<bool> requiresGrad() const { std::optional<bool> requiresGrad() const {
return requires_grad_; return requires_grad_;
} }
bool requires_grad() const override { bool requires_grad() const override {
@ -651,32 +651,32 @@ struct TORCH_API TensorType : public SharedType {
} }
} }
c10::optional<size_t> numel() const { std::optional<size_t> numel() const {
size_t prod = 1; size_t prod = 1;
const auto& shape = sizes(); const auto& shape = sizes();
for (size_t i = 0; i < shape.size(); i++) { for (size_t i = 0; i < shape.size(); i++) {
if (!shape[i]) { if (!shape[i]) {
return c10::optional<size_t>{}; return std::optional<size_t>{};
} }
prod *= shape[i].value(); prod *= shape[i].value();
} }
return prod; return prod;
} }
TensorTypePtr withRequiresGrad(c10::optional<bool> s) { TensorTypePtr withRequiresGrad(std::optional<bool> s) {
auto copy = clone(); auto copy = clone();
copy->requires_grad_ = s; copy->requires_grad_ = s;
return copy; return copy;
} }
TensorTypePtr withScalarType(c10::optional<ScalarType> st) { TensorTypePtr withScalarType(std::optional<ScalarType> st) {
auto copy = clone(); auto copy = clone();
copy->scalar_type_ = st; copy->scalar_type_ = st;
return copy; return copy;
} }
TensorTypePtr withDim(c10::optional<size_t> d) { TensorTypePtr withDim(std::optional<size_t> d) {
auto copy = clone(); auto copy = clone();
// withDim is only used by the legacy executor // withDim is only used by the legacy executor
// that only cares about the rank, so create dummy symbols)) : // that only cares about the rank, so create dummy symbols)) :
@ -712,7 +712,7 @@ struct TORCH_API TensorType : public SharedType {
sizes, contiguousStridesOf(sizes)); sizes, contiguousStridesOf(sizes));
} }
TensorTypePtr withDevice(const c10::optional<at::Device> device) const { TensorTypePtr withDevice(const std::optional<at::Device> device) const {
auto copy = clone(); auto copy = clone();
copy->device_ = device; copy->device_ = device;
return copy; return copy;
@ -784,7 +784,7 @@ struct TORCH_API TensorType : public SharedType {
return r; return r;
} }
c10::optional<bool> undefined() const { return undefined_; } std::optional<bool> undefined() const { return undefined_; }
static const TensorTypePtr& get(); static const TensorTypePtr& get();
@ -824,12 +824,12 @@ struct TORCH_API TensorType : public SharedType {
private: private:
TensorType( TensorType(
c10::optional<at::ScalarType> scalar_type, std::optional<at::ScalarType> scalar_type,
c10::optional<Device> device, std::optional<Device> device,
SymbolicShape sizes, SymbolicShape sizes,
VaryingShape<Stride> strides, VaryingShape<Stride> strides,
c10::optional<bool> requires_grad, std::optional<bool> requires_grad,
c10::optional<bool> undefined = false); std::optional<bool> undefined = false);
TensorTypePtr clone() const { TensorTypePtr clone() const {
return TensorTypePtr(new TensorType( return TensorTypePtr(new TensorType(
@ -841,11 +841,11 @@ struct TORCH_API TensorType : public SharedType {
at::IntArrayRef strides, at::IntArrayRef strides,
bool tensor_contiguity = false); bool tensor_contiguity = false);
c10::optional<at::ScalarType> scalar_type_; std::optional<at::ScalarType> scalar_type_;
c10::optional<at::Device> device_; std::optional<at::Device> device_;
SymbolicShape sizes_; SymbolicShape sizes_;
VaryingShape<Stride> strides_; VaryingShape<Stride> strides_;
c10::optional<bool> requires_grad_; std::optional<bool> requires_grad_;
// we exploit the fact certain tensors must be zero in the autograd to // we exploit the fact certain tensors must be zero in the autograd to
// optimize gradient computation. Such zero tensors are currently implemented // optimize gradient computation. Such zero tensors are currently implemented
// with `UndefinedTensorImpl.` They can be handled only by special operators // with `UndefinedTensorImpl.` They can be handled only by special operators
@ -857,7 +857,7 @@ struct TORCH_API TensorType : public SharedType {
// undefined_ may become `c10::nullopt` if the tensor was observed to be both // undefined_ may become `c10::nullopt` if the tensor was observed to be both
// defined and undefined. However, no tensor type starts out with // defined and undefined. However, no tensor type starts out with
// `undefined_` set to `c10::nullopt` // `undefined_` set to `c10::nullopt`
c10::optional<bool> undefined_; std::optional<bool> undefined_;
// Represents whether or not this type was inferred. // Represents whether or not this type was inferred.
bool is_inferred_ = false; bool is_inferred_ = false;
}; };
@ -1144,16 +1144,16 @@ using NameList = std::vector<std::string>;
// This type represents a Tuple // This type represents a Tuple
struct TORCH_API TupleType : public NamedType { struct TORCH_API TupleType : public NamedType {
static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name, static TupleTypePtr createNamed(const std::optional<c10::QualifiedName>& name,
const std::vector<std::string>& field_names, const std::vector<std::string>& field_names,
const std::vector<TypePtr>& field_types, const std::vector<TypePtr>& field_types,
std::vector<IValue>& field_defaults); std::vector<IValue>& field_defaults);
static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name, static TupleTypePtr createNamed(const std::optional<c10::QualifiedName>& name,
const std::vector<std::string>& field_names, const std::vector<std::string>& field_names,
const std::vector<TypePtr>& field_types); const std::vector<TypePtr>& field_types);
static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name, static TupleTypePtr createNamed(const std::optional<c10::QualifiedName>& name,
const std::vector<c10::string_view>& field_names, const std::vector<c10::string_view>& field_names,
const std::vector<TypePtr>& field_types); const std::vector<TypePtr>& field_types);
@ -1190,21 +1190,21 @@ struct TORCH_API TupleType : public NamedType {
const std::shared_ptr<FunctionSchema>& schema() const { const std::shared_ptr<FunctionSchema>& schema() const {
return schema_; return schema_;
} }
c10::optional<std::vector<c10::string_view>> names() const; std::optional<std::vector<c10::string_view>> names() const;
static const TypeKind Kind = TypeKind::TupleType; static const TypeKind Kind = TypeKind::TupleType;
private: private:
template <typename S> template <typename S>
static TupleTypePtr createWithSpec( static TupleTypePtr createWithSpec(
const c10::optional<c10::QualifiedName>& name, const std::optional<c10::QualifiedName>& name,
const std::vector<S>& field_names, const std::vector<S>& field_names,
const std::vector<TypePtr>& field_types, const std::vector<TypePtr>& field_types,
std::vector<IValue>& field_defaults); std::vector<IValue>& field_defaults);
TupleType( TupleType(
std::vector<TypePtr> elements_, std::vector<TypePtr> elements_,
c10::optional<c10::QualifiedName> name, std::optional<c10::QualifiedName> name,
std::shared_ptr<FunctionSchema> schema); std::shared_ptr<FunctionSchema> schema);
bool compare( bool compare(
@ -1747,7 +1747,7 @@ inline TypePtr TensorType::fromBoolType() {
return TensorType::createContiguous(at::kBool, at::kCPU, {}); return TensorType::createContiguous(at::kBool, at::kCPU, {});
} }
inline c10::optional<c10::ScalarType> tryScalarTypeFromJitType(const Type& type) { inline std::optional<c10::ScalarType> tryScalarTypeFromJitType(const Type& type) {
if (type == *FloatType::get()) { if (type == *FloatType::get()) {
return at::typeMetaToScalarType(c10::get_default_dtype()); return at::typeMetaToScalarType(c10::get_default_dtype());
} else if (type == *IntType::get()) { } else if (type == *IntType::get()) {
@ -1782,13 +1782,13 @@ inline at::ScalarType scalarTypeFromJitType(const Type& type) {
// If `type_hint` is an `InterfaceType`, then we can use that as a // If `type_hint` is an `InterfaceType`, then we can use that as a
// potential supertype for `ClassType`s in the list. Otherwise, we have // potential supertype for `ClassType`s in the list. Otherwise, we have
// no way to find and use some common interface type // no way to find and use some common interface type
TORCH_API c10::optional<TypePtr> unifyTypes( TORCH_API std::optional<TypePtr> unifyTypes(
const TypePtr& t1, const TypePtr& t1,
const TypePtr& t2, const TypePtr& t2,
bool default_to_union = false, bool default_to_union = false,
const TypePtr& type_hint = nullptr); const TypePtr& type_hint = nullptr);
TORCH_API c10::optional<TypePtr> unifyTypeList( TORCH_API std::optional<TypePtr> unifyTypeList(
at::ArrayRef<TypePtr> elements, at::ArrayRef<TypePtr> elements,
std::ostream& why_not, std::ostream& why_not,
bool default_to_union = false, bool default_to_union = false,
@ -2132,7 +2132,7 @@ struct MatchTypeReturn {
private: private:
MatchTypeReturn() MatchTypeReturn()
: reason_(c10::nullopt) {} : reason_(c10::nullopt) {}
c10::optional<std::string> reason_; // is there is no match, this contains the reason std::optional<std::string> reason_; // is there is no match, this contains the reason
}; };
// attempt to match the type variables in formal to actual, adding them to type_env. // attempt to match the type variables in formal to actual, adding them to type_env.

View File

@ -75,7 +75,7 @@ struct SharedType;
// Use this to customize how a Type is printed using `annotation_str()`. If // Use this to customize how a Type is printed using `annotation_str()`. If
// c10::nullopt is returned, `annotation_str()` falls through to its default // c10::nullopt is returned, `annotation_str()` falls through to its default
// implementation. // implementation.
using TypePrinter = std::function<c10::optional<std::string>(const Type&)>; using TypePrinter = std::function<std::optional<std::string>(const Type&)>;
namespace detail { namespace detail {
template <typename T> template <typename T>
@ -688,7 +688,7 @@ using NamedTypePtr = std::shared_ptr<NamedType>;
using ConstNamedTypePtr = std::shared_ptr<const NamedType>; using ConstNamedTypePtr = std::shared_ptr<const NamedType>;
struct TORCH_API NamedType : public SharedType { struct TORCH_API NamedType : public SharedType {
NamedType(TypeKind tk, c10::optional<QualifiedName> name) NamedType(TypeKind tk, std::optional<QualifiedName> name)
: SharedType(tk), name_(std::move(name)) { : SharedType(tk), name_(std::move(name)) {
TORCH_INTERNAL_ASSERT( TORCH_INTERNAL_ASSERT(
tk == TypeKind::TupleType || tk == TypeKind::FunctionType || tk == TypeKind::TupleType || tk == TypeKind::FunctionType ||
@ -700,12 +700,12 @@ struct TORCH_API NamedType : public SharedType {
// Fully qualified name of type // Fully qualified name of type
// Looks like: "foo.bar.Baz". // Looks like: "foo.bar.Baz".
const c10::optional<QualifiedName>& name() const { const std::optional<QualifiedName>& name() const {
return name_; return name_;
} }
private: private:
c10::optional<QualifiedName> name_; std::optional<QualifiedName> name_;
}; };
} // namespace c10 } // namespace c10

View File

@ -42,7 +42,7 @@ namespace {
constexpr auto CatchAll = c10::DispatchKey::CatchAll; constexpr auto CatchAll = c10::DispatchKey::CatchAll;
} // anonymous namespace } // anonymous namespace
CppFunction::CppFunction(c10::KernelFunction func, c10::optional<c10::impl::CppSignature> cpp_signature, std::unique_ptr<c10::FunctionSchema> schema) CppFunction::CppFunction(c10::KernelFunction func, std::optional<c10::impl::CppSignature> cpp_signature, std::unique_ptr<c10::FunctionSchema> schema)
: func_(std::move(func)) : func_(std::move(func))
, cpp_signature_(cpp_signature) , cpp_signature_(cpp_signature)
, schema_(std::move(schema)) , schema_(std::move(schema))
@ -57,10 +57,10 @@ void Library::reset() {
#define ERROR_CONTEXT "(Error occurred while processing ", toString(kind_), " block at ", file_, ":", line_, ")" #define ERROR_CONTEXT "(Error occurred while processing ", toString(kind_), " block at ", file_, ":", line_, ")"
Library::Library(Kind kind, std::string ns, c10::optional<c10::DispatchKey> k, const char* file, uint32_t line) Library::Library(Kind kind, std::string ns, std::optional<c10::DispatchKey> k, const char* file, uint32_t line)
: kind_(kind) : kind_(kind)
, ns_(ns == "_" ? c10::nullopt : c10::make_optional(std::move(ns))) , ns_(ns == "_" ? c10::nullopt : c10::make_optional(std::move(ns)))
, dispatch_key_(k.value_or(CatchAll) == CatchAll ? c10::optional<c10::DispatchKey>() : k) , dispatch_key_(k.value_or(CatchAll) == CatchAll ? std::optional<c10::DispatchKey>() : k)
, file_(file) , file_(file)
, line_(line) , line_(line)
{ {

View File

@ -43,7 +43,7 @@ FunctionSchema make_function_schema(
} // namespace infer_schema } // namespace infer_schema
} // namespace detail } // namespace detail
c10::optional<std::string> findSchemaDifferences( std::optional<std::string> findSchemaDifferences(
const FunctionSchema& lhs, const FunctionSchema& lhs,
const FunctionSchema& rhs) { const FunctionSchema& rhs) {
if (lhs.arguments().size() != rhs.arguments().size()) { if (lhs.arguments().size() != rhs.arguments().size()) {

View File

@ -155,6 +155,6 @@ FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&&
return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name)); return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name));
} }
TORCH_API c10::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified); TORCH_API std::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
} }

View File

@ -17,9 +17,9 @@ void build_feature_required_feature_not_available(const char* feature) {
} // namespace impl } // namespace impl
static_assert(std::is_nothrow_move_constructible< static_assert(std::is_nothrow_move_constructible<
c10::optional<RegistrationHandleRAII>>::value); std::optional<RegistrationHandleRAII>>::value);
static_assert(std::is_nothrow_move_assignable< static_assert(std::is_nothrow_move_assignable<
c10::optional<RegistrationHandleRAII>>::value); std::optional<RegistrationHandleRAII>>::value);
void RegisterOperators::checkSchemaAndRegisterOp_(Options&& options) { void RegisterOperators::checkSchemaAndRegisterOp_(Options&& options) {
TORCH_CHECK( TORCH_CHECK(
@ -71,7 +71,7 @@ c10::FunctionSchema RegisterOperators::inferSchemaFromKernels_(
opName, opName,
" because there is no kernel specified."); " because there is no kernel specified.");
c10::optional<FunctionSchema> inferred_schema = c10::nullopt; std::optional<FunctionSchema> inferred_schema = c10::nullopt;
for (const auto& kernel : options.kernels) { for (const auto& kernel : options.kernels) {
if (nullptr != kernel.inferred_function_schema.get()) { if (nullptr != kernel.inferred_function_schema.get()) {
if (!inferred_schema.has_value()) { if (!inferred_schema.has_value()) {

View File

@ -399,7 +399,7 @@ public:
} }
private: private:
Options&& kernel(c10::optional<DispatchKey> dispatch_key, KernelFunction&& func, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && { Options&& kernel(std::optional<DispatchKey> dispatch_key, KernelFunction&& func, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
KernelRegistrationConfig config; KernelRegistrationConfig config;
config.dispatch_key = dispatch_key; config.dispatch_key = dispatch_key;
config.func = std::move(func); config.func = std::move(func);
@ -425,13 +425,13 @@ public:
, inferred_function_schema(nullptr) , inferred_function_schema(nullptr)
{} {}
c10::optional<DispatchKey> dispatch_key; std::optional<DispatchKey> dispatch_key;
KernelFunction func; KernelFunction func;
c10::optional<impl::CppSignature> cpp_signature; std::optional<impl::CppSignature> cpp_signature;
std::unique_ptr<FunctionSchema> inferred_function_schema; std::unique_ptr<FunctionSchema> inferred_function_schema;
}; };
c10::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_; std::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_;
std::vector<KernelRegistrationConfig> kernels; std::vector<KernelRegistrationConfig> kernels;
optional<AliasAnalysisKind> aliasAnalysisKind_; optional<AliasAnalysisKind> aliasAnalysisKind_;

View File

@ -882,56 +882,56 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// optional types (with has_value() == true) // optional types (with has_value() == true)
testArgTypes<c10::optional<double>>::test( testArgTypes<std::optional<double>>::test(
c10::optional<double>(1.5), [] (const c10::optional<double>& v) {EXPECT_EQ(1.5, v.value());}, std::optional<double>(1.5), [] (const c10::optional<double>& v) {EXPECT_EQ(1.5, v.value());},
c10::optional<double>(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());}, std::optional<double>(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());},
"(float? a) -> float?"); "(float? a) -> float?");
testArgTypes<c10::optional<int64_t>>::test( testArgTypes<std::optional<int64_t>>::test(
c10::optional<int64_t>(1), [] (const c10::optional<int64_t>& v) {EXPECT_EQ(1, v.value());}, std::optional<int64_t>(1), [] (const c10::optional<int64_t>& v) {EXPECT_EQ(1, v.value());},
c10::optional<int64_t>(2), [] (const IValue& v) {EXPECT_EQ(2, v.toInt());}, std::optional<int64_t>(2), [] (const IValue& v) {EXPECT_EQ(2, v.toInt());},
"(int? a) -> int?"); "(int? a) -> int?");
testArgTypes<c10::optional<bool>>::test( testArgTypes<std::optional<bool>>::test(
c10::optional<bool>(true), [] (const c10::optional<bool>& v) {EXPECT_EQ(true, v.value());}, std::optional<bool>(true), [] (const c10::optional<bool>& v) {EXPECT_EQ(true, v.value());},
c10::optional<bool>(false), [] (const IValue& v) {EXPECT_EQ(false, v.toBool());}, std::optional<bool>(false), [] (const IValue& v) {EXPECT_EQ(false, v.toBool());},
"(bool? a) -> bool?"); "(bool? a) -> bool?");
testArgTypes<c10::optional<bool>>::test( testArgTypes<std::optional<bool>>::test(
c10::optional<bool>(false), [] (const c10::optional<bool>& v) {EXPECT_EQ(false, v.value());}, std::optional<bool>(false), [] (const c10::optional<bool>& v) {EXPECT_EQ(false, v.value());},
c10::optional<bool>(true), [] (const IValue& v) {EXPECT_EQ(true, v.toBool());}, std::optional<bool>(true), [] (const IValue& v) {EXPECT_EQ(true, v.toBool());},
"(bool? a) -> bool?"); "(bool? a) -> bool?");
testArgTypes<c10::optional<std::string>>::test( testArgTypes<std::optional<std::string>>::test(
c10::optional<std::string>("string1"), [] (const c10::optional<std::string>& v) {EXPECT_EQ("string1", v.value());}, std::optional<std::string>("string1"), [] (const c10::optional<std::string>& v) {EXPECT_EQ("string1", v.value());},
c10::optional<std::string>("string2"), [] (const IValue& v) {EXPECT_EQ("string2", v.toStringRef());}, std::optional<std::string>("string2"), [] (const IValue& v) {EXPECT_EQ("string2", v.toStringRef());},
"(str? a) -> str?"); "(str? a) -> str?");
testArgTypes<c10::optional<Tensor>>::test( testArgTypes<std::optional<Tensor>>::test(
c10::optional<Tensor>(dummyTensor(c10::DispatchKey::CPU)), [] (const c10::optional<Tensor>& v) {EXPECT_EQ(c10::DispatchKey::CPU, extractDispatchKey(v.value()));}, std::optional<Tensor>(dummyTensor(c10::DispatchKey::CPU)), [] (const c10::optional<Tensor>& v) {EXPECT_EQ(c10::DispatchKey::CPU, extractDispatchKey(v.value()));},
c10::optional<Tensor>(dummyTensor(c10::DispatchKey::CUDA)), [] (const IValue& v) {EXPECT_EQ(c10::DispatchKey::CUDA, extractDispatchKey(v.toTensor()));}, std::optional<Tensor>(dummyTensor(c10::DispatchKey::CUDA)), [] (const IValue& v) {EXPECT_EQ(c10::DispatchKey::CUDA, extractDispatchKey(v.toTensor()));},
"(Tensor? a) -> Tensor?"); "(Tensor? a) -> Tensor?");
// optional types (with has_value() == false) // optional types (with has_value() == false)
testArgTypes<c10::optional<double>>::test( testArgTypes<std::optional<double>>::test(
c10::optional<double>(c10::nullopt), [] (const c10::optional<double>& v) {EXPECT_FALSE(v.has_value());}, std::optional<double>(c10::nullopt), [] (const c10::optional<double>& v) {EXPECT_FALSE(v.has_value());},
c10::optional<double>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, std::optional<double>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(float? a) -> float?"); "(float? a) -> float?");
testArgTypes<c10::optional<int64_t>>::test( testArgTypes<std::optional<int64_t>>::test(
c10::optional<int64_t>(c10::nullopt), [] (const c10::optional<int64_t>& v) {EXPECT_FALSE(v.has_value());}, std::optional<int64_t>(c10::nullopt), [] (const c10::optional<int64_t>& v) {EXPECT_FALSE(v.has_value());},
c10::optional<int64_t>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, std::optional<int64_t>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(int? a) -> int?"); "(int? a) -> int?");
testArgTypes<c10::optional<bool>>::test( testArgTypes<std::optional<bool>>::test(
c10::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());}, std::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
c10::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, std::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(bool? a) -> bool?"); "(bool? a) -> bool?");
testArgTypes<c10::optional<bool>>::test( testArgTypes<std::optional<bool>>::test(
c10::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());}, std::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
c10::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, std::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(bool? a) -> bool?"); "(bool? a) -> bool?");
testArgTypes<c10::optional<std::string>>::test( testArgTypes<std::optional<std::string>>::test(
c10::optional<std::string>(c10::nullopt), [] (const c10::optional<std::string>& v) {EXPECT_FALSE(v.has_value());}, std::optional<std::string>(c10::nullopt), [] (const c10::optional<std::string>& v) {EXPECT_FALSE(v.has_value());},
c10::optional<std::string>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, std::optional<std::string>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(str? a) -> str?"); "(str? a) -> str?");
testArgTypes<c10::optional<Tensor>>::test( testArgTypes<std::optional<Tensor>>::test(
c10::optional<Tensor>(c10::nullopt), [] (const c10::optional<Tensor>& v) {EXPECT_FALSE(v.has_value());}, std::optional<Tensor>(c10::nullopt), [] (const c10::optional<Tensor>& v) {EXPECT_FALSE(v.has_value());},
c10::optional<Tensor>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, std::optional<Tensor>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(Tensor? a) -> Tensor?"); "(Tensor? a) -> Tensor?");
@ -1136,21 +1136,21 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
"(Tensor[] a) -> Tensor[]"); "(Tensor[] a) -> Tensor[]");
// Test optional of list (with nullopt) // Test optional of list (with nullopt)
testArgTypes<c10::optional<c10::List<int64_t>>>::test( testArgTypes<std::optional<c10::List<int64_t>>>::test(
c10::optional<c10::List<int64_t>>(c10::nullopt), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_FALSE(v.has_value());}, std::optional<c10::List<int64_t>>(c10::nullopt), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_FALSE(v.has_value());},
c10::optional<c10::List<int64_t>>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, std::optional<c10::List<int64_t>>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(int[]? a) -> int[]?"); "(int[]? a) -> int[]?");
// Test optional of list (with empty list) // Test optional of list (with empty list)
testArgTypes<c10::optional<c10::List<int64_t>>>::test( testArgTypes<std::optional<c10::List<int64_t>>>::test(
c10::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_EQ(0, v.value().size());}, std::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_EQ(0, v.value().size());},
c10::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const IValue& v) {EXPECT_EQ(0, v.to<c10::List<int64_t>>().size());}, std::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const IValue& v) {EXPECT_EQ(0, v.to<c10::List<int64_t>>().size());},
"(int[]? a) -> int[]?"); "(int[]? a) -> int[]?");
// Test optional of list (with values) // Test optional of list (with values)
testArgTypes<c10::optional<c10::List<int64_t>>>::test( testArgTypes<std::optional<c10::List<int64_t>>>::test(
c10::optional<c10::List<int64_t>>(c10::List<int64_t>({1, 2})), [] (const c10::optional<c10::List<int64_t>>& v) {expectListEquals({1, 2}, v.value());}, std::optional<c10::List<int64_t>>(c10::List<int64_t>({1, 2})), [] (const c10::optional<c10::List<int64_t>>& v) {expectListEquals({1, 2}, v.value());},
c10::optional<c10::List<int64_t>>(c10::List<int64_t>({3, 4})), [] (const IValue& v) {expectListEquals({3, 4}, v.to<c10::List<int64_t>>());}, std::optional<c10::List<int64_t>>(c10::List<int64_t>({3, 4})), [] (const IValue& v) {expectListEquals({3, 4}, v.to<c10::List<int64_t>>());},
"(int[]? a) -> int[]?"); "(int[]? a) -> int[]?");
// Test list of optional (with empty list) // Test list of optional (with empty list)
@ -1161,8 +1161,8 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// Test list of optional (with values) // Test list of optional (with values)
testArgTypes<c10::List<::std::optional<int64_t>>>::test( testArgTypes<c10::List<::std::optional<int64_t>>>::test(
c10::List<::std::optional<int64_t>>(c10::List<::std::optional<int64_t>>({3, c10::nullopt, 2})), [] (const c10::List<::std::optional<int64_t>>& v) {expectListEquals<c10::optional<int64_t>>({3, c10::nullopt, 2}, v);}, c10::List<::std::optional<int64_t>>(c10::List<::std::optional<int64_t>>({3, c10::nullopt, 2})), [] (const c10::List<::std::optional<int64_t>>& v) {expectListEquals<std::optional<int64_t>>({3, c10::nullopt, 2}, v);},
c10::List<::std::optional<int64_t>>(c10::List<::std::optional<int64_t>>({3, c10::nullopt, 2})), [] (const IValue& v) {expectListEquals<c10::optional<int64_t>>({3, c10::nullopt, 2}, v.to<c10::List<::std::optional<int64_t>>>());}, c10::List<::std::optional<int64_t>>(c10::List<::std::optional<int64_t>>({3, c10::nullopt, 2})), [] (const IValue& v) {expectListEquals<std::optional<int64_t>>({3, c10::nullopt, 2}, v.to<c10::List<::std::optional<int64_t>>>());},
"(int?[] a) -> int?[]"); "(int?[] a) -> int?[]");
// dict types // dict types

View File

@ -23,7 +23,7 @@ struct OperatorName final {
// Return the namespace of this OperatorName, if it exists. The // Return the namespace of this OperatorName, if it exists. The
// returned string_view is only live as long as the OperatorName // returned string_view is only live as long as the OperatorName
// exists and name is not mutated // exists and name is not mutated
c10::optional<c10::string_view> getNamespace() const { std::optional<c10::string_view> getNamespace() const {
auto pos = name.find("::"); auto pos = name.find("::");
if (pos == std::string::npos) { if (pos == std::string::npos) {
return c10::nullopt; return c10::nullopt;

View File

@ -274,12 +274,12 @@ TensorTypePtr TensorType::create(const at::Tensor& t) {
} }
TensorTypePtr TensorType::create( TensorTypePtr TensorType::create(
c10::optional<at::ScalarType> scalar_type, std::optional<at::ScalarType> scalar_type,
c10::optional<Device> device, std::optional<Device> device,
const VaryingShape<int64_t>& sizes, const VaryingShape<int64_t>& sizes,
const VaryingShape<int64_t>& strides, const VaryingShape<int64_t>& strides,
c10::optional<bool> requires_grad, std::optional<bool> requires_grad,
c10::optional<bool> undefined, bool tensor_contiguity) { std::optional<bool> undefined, bool tensor_contiguity) {
if(strides.concrete_sizes() && strides.concrete_sizes().has_value()){ if(strides.concrete_sizes() && strides.concrete_sizes().has_value()){
// handles case where strides are set // handles case where strides are set
// NOLINTNEXTLINE(bugprone-unchecked-optional-access) // NOLINTNEXTLINE(bugprone-unchecked-optional-access)
@ -304,22 +304,22 @@ TensorTypePtr TensorType::create(
} }
TensorTypePtr TensorType::create( TensorTypePtr TensorType::create(
c10::optional<at::ScalarType> scalar_type, std::optional<at::ScalarType> scalar_type,
c10::optional<Device> device, std::optional<Device> device,
const SymbolicShape& sizes, const SymbolicShape& sizes,
const VaryingShape<Stride>& strides, const VaryingShape<Stride>& strides,
c10::optional<bool> requires_grad, std::optional<bool> requires_grad,
c10::optional<bool> undefined) { std::optional<bool> undefined) {
auto pt = TensorTypePtr(new TensorType( auto pt = TensorTypePtr(new TensorType(
scalar_type, device, sizes, strides, requires_grad, undefined)); scalar_type, device, sizes, strides, requires_grad, undefined));
return pt; return pt;
} }
TensorTypePtr TensorType::create( TensorTypePtr TensorType::create(
c10::optional<at::ScalarType> scalar_type, std::optional<at::ScalarType> scalar_type,
c10::optional<Device> device, std::optional<Device> device,
c10::optional<size_t> dim, std::optional<size_t> dim,
c10::optional<bool> requires_grad) { std::optional<bool> requires_grad) {
return TensorType::create( return TensorType::create(
scalar_type, scalar_type,
device, device,
@ -349,7 +349,7 @@ VaryingShape<int64_t> TensorType::sizes() const {
fmap(*sizes_.sizes(), [](ShapeSymbol ss) { fmap(*sizes_.sizes(), [](ShapeSymbol ss) {
// we turn symbolic shapes into unknowns // we turn symbolic shapes into unknowns
return ss.is_static() return ss.is_static()
? c10::optional<int64_t>(ss.static_size()) ? std::optional<int64_t>(ss.static_size())
: c10::nullopt; : c10::nullopt;
})); }));
} }
@ -371,7 +371,7 @@ TensorTypePtr TensorType::merge(const TensorType& other, bool merge_sizes) const
} }
template <typename T> template <typename T>
bool is_null_or_equal(c10::optional<T> a, c10::IntArrayRef b) { bool is_null_or_equal(std::optional<T> a, c10::IntArrayRef b) {
return !a.has_value() || a.value() == b; return !a.has_value() || a.value() == b;
} }
@ -417,7 +417,7 @@ VaryingShape<int64_t> TensorType::strides() const {
if (!strides_.size().has_value()) { if (!strides_.size().has_value()) {
return VaryingShape<int64_t>(); return VaryingShape<int64_t>();
} }
std::vector<c10::optional<int64_t>> ss(*strides_.size()); std::vector<std::optional<int64_t>> ss(*strides_.size());
for (size_t i = 0; i < *strides_.size(); i++) { for (size_t i = 0; i < *strides_.size(); i++) {
if (!strides_[i].has_value()) { if (!strides_[i].has_value()) {
continue; continue;
@ -431,12 +431,12 @@ VaryingShape<int64_t> TensorType::strides() const {
} }
TensorType::TensorType( TensorType::TensorType(
c10::optional<at::ScalarType> scalar_type, std::optional<at::ScalarType> scalar_type,
c10::optional<Device> device, std::optional<Device> device,
SymbolicShape sizes, SymbolicShape sizes,
VaryingShape<Stride> strides, VaryingShape<Stride> strides,
c10::optional<bool> requires_grad, std::optional<bool> requires_grad,
c10::optional<bool> undefined) std::optional<bool> undefined)
: SharedType(TypeKind::TensorType), : SharedType(TypeKind::TensorType),
scalar_type_(scalar_type), scalar_type_(scalar_type),
device_(device), device_(device),

View File

@ -364,7 +364,7 @@ SymBoolTypePtr SymBoolType::get() {
return value; return value;
} }
static c10::optional<TypePtr> unifyTypesImpl(const TypePtr& t1, const TypePtr& t2, bool default_to_union=false, const TypePtr& type_hint=nullptr) { static std::optional<TypePtr> unifyTypesImpl(const TypePtr& t1, const TypePtr& t2, bool default_to_union=false, const TypePtr& type_hint=nullptr) {
// check direct subtyping relation // check direct subtyping relation
if (t1->isSubtypeOf(*t2)) { if (t1->isSubtypeOf(*t2)) {
return t2; return t2;
@ -446,7 +446,7 @@ static c10::optional<TypePtr> unifyTypesImpl(const TypePtr& t1, const TypePtr& t
return c10::nullopt; return c10::nullopt;
} }
c10::optional<TypePtr> unifyTypes(const TypePtr& t1, const TypePtr& t2, bool default_to_union, const TypePtr& type_hint) { std::optional<TypePtr> unifyTypes(const TypePtr& t1, const TypePtr& t2, bool default_to_union, const TypePtr& type_hint) {
auto unified = unifyTypesImpl(t1, t2, default_to_union, type_hint); auto unified = unifyTypesImpl(t1, t2, default_to_union, type_hint);
if (default_to_union && !unified) { if (default_to_union && !unified) {
@ -456,7 +456,7 @@ c10::optional<TypePtr> unifyTypes(const TypePtr& t1, const TypePtr& t2, bool def
return unified; return unified;
} }
c10::optional<TypePtr> unifyTypeList( std::optional<TypePtr> unifyTypeList(
at::ArrayRef<TypePtr> elements, at::ArrayRef<TypePtr> elements,
std::ostream& why_not, std::ostream& why_not,
bool default_to_union, bool default_to_union,
@ -468,7 +468,7 @@ c10::optional<TypePtr> unifyTypeList(
TypePtr ret_type = elements.at(0); TypePtr ret_type = elements.at(0);
for (size_t i = 1; i < elements.size() && ret_type; ++i) { for (size_t i = 1; i < elements.size() && ret_type; ++i) {
c10::optional<TypePtr> maybe_unified = unifyTypes(ret_type, elements.at(i), default_to_union, type_hint); std::optional<TypePtr> maybe_unified = unifyTypes(ret_type, elements.at(i), default_to_union, type_hint);
if (!maybe_unified) { if (!maybe_unified) {
why_not << "Could not unify type list since element " << i << " of type " why_not << "Could not unify type list since element " << i << " of type "
<< elements.at(i)->repr_str() << elements.at(i)->repr_str()
@ -719,7 +719,7 @@ bool Type::is_module() const {
} }
TupleTypePtr TupleType::createNamed( TupleTypePtr TupleType::createNamed(
const c10::optional<c10::QualifiedName>& qualName, const std::optional<c10::QualifiedName>& qualName,
const std::vector<std::string>& field_names, const std::vector<std::string>& field_names,
const std::vector<TypePtr>& field_types) { const std::vector<TypePtr>& field_types) {
std::vector<IValue> empty_defaults; std::vector<IValue> empty_defaults;
@ -727,7 +727,7 @@ TupleTypePtr TupleType::createNamed(
} }
TupleTypePtr TupleType::createNamed( TupleTypePtr TupleType::createNamed(
const c10::optional<c10::QualifiedName>& qualName, const std::optional<c10::QualifiedName>& qualName,
const std::vector<c10::string_view>& field_names, const std::vector<c10::string_view>& field_names,
const std::vector<TypePtr>& field_types) { const std::vector<TypePtr>& field_types) {
std::vector<IValue> empty_defaults; std::vector<IValue> empty_defaults;
@ -735,7 +735,7 @@ TupleTypePtr TupleType::createNamed(
} }
TupleTypePtr TupleType::createNamed( TupleTypePtr TupleType::createNamed(
const c10::optional<c10::QualifiedName>& qualName, const std::optional<c10::QualifiedName>& qualName,
const std::vector<std::string>& field_names, const std::vector<std::string>& field_names,
const std::vector<TypePtr>& field_types, const std::vector<TypePtr>& field_types,
std::vector<IValue>& field_defaults) { std::vector<IValue>& field_defaults) {
@ -743,7 +743,7 @@ TupleTypePtr TupleType::createNamed(
} }
template <typename S> template <typename S>
TupleTypePtr TupleType::createWithSpec(const c10::optional<c10::QualifiedName>& qualName, TupleTypePtr TupleType::createWithSpec(const std::optional<c10::QualifiedName>& qualName,
const std::vector<S>& field_names, const std::vector<S>& field_names,
const std::vector<TypePtr>& field_types, const std::vector<TypePtr>& field_types,
std::vector<IValue>& field_defaults) { std::vector<IValue>& field_defaults) {
@ -784,7 +784,7 @@ TupleTypePtr TupleType::createWithSpec(const c10::optional<c10::QualifiedName>&
field_types, qualName, std::move(schema))); // NOLINT(modernize-make-shared) field_types, qualName, std::move(schema))); // NOLINT(modernize-make-shared)
} }
c10::optional<std::vector<c10::string_view>> TupleType::names() const { std::optional<std::vector<c10::string_view>> TupleType::names() const {
if (!schema_) { if (!schema_) {
return {}; return {};
} }
@ -820,7 +820,7 @@ bool NumberType::isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const {
TupleType::TupleType( TupleType::TupleType(
std::vector<TypePtr> elements, std::vector<TypePtr> elements,
c10::optional<c10::QualifiedName> name, std::optional<c10::QualifiedName> name,
std::shared_ptr<FunctionSchema> schema) std::shared_ptr<FunctionSchema> schema)
: NamedType(TypeKind::TupleType, std::move(name)), : NamedType(TypeKind::TupleType, std::move(name)),
elements_(std::move(elements)), elements_(std::move(elements)),

View File

@ -29,7 +29,7 @@ ListTypePtr ListType::ofOptionalTensors() {
namespace { namespace {
c10::optional<TypePtr> subtractTypeSetFrom(std::vector<TypePtr>& to_subtract, ArrayRef<TypePtr> from) { std::optional<TypePtr> subtractTypeSetFrom(std::vector<TypePtr>& to_subtract, ArrayRef<TypePtr> from) {
std::vector<TypePtr> types; std::vector<TypePtr> types;
// Given a TypePtr `lhs`, this function says whether or not `lhs` (or // Given a TypePtr `lhs`, this function says whether or not `lhs` (or
@ -93,7 +93,7 @@ void filterDuplicateSubtypes(std::vector<TypePtr>* types) {
if (types->empty()) { if (types->empty()) {
return; return;
} }
auto get_supertype = [](const TypePtr& t1, const TypePtr& t2) -> c10::optional<TypePtr> { auto get_supertype = [](const TypePtr& t1, const TypePtr& t2) -> std::optional<TypePtr> {
// We don't want nested Optionals. Also, prematurely unifying to // We don't want nested Optionals. Also, prematurely unifying to
// `Optional` could prevent us from coalescing other types // `Optional` could prevent us from coalescing other types
if ((t1->isSubtypeOf(*NoneType::get()) && !t2->isSubtypeOf(*NoneType::get())) if ((t1->isSubtypeOf(*NoneType::get()) && !t2->isSubtypeOf(*NoneType::get()))
@ -114,7 +114,7 @@ void filterDuplicateSubtypes(std::vector<TypePtr>* types) {
size_t end_idx = types->size()-1; size_t end_idx = types->size()-1;
for (size_t i = types->size()-1; i > 0; --i) { for (size_t i = types->size()-1; i > 0; --i) {
for (size_t j = std::min(i-1, end_idx); ; --j) { for (size_t j = std::min(i-1, end_idx); ; --j) {
c10::optional<TypePtr> unified; std::optional<TypePtr> unified;
unified = get_supertype((*types)[i], (*types)[j]); unified = get_supertype((*types)[i], (*types)[j]);
if (unified) { if (unified) {
(*types)[j] = *unified; (*types)[j] = *unified;
@ -272,11 +272,11 @@ UnionTypePtr UnionType::create(std::vector<TypePtr> reference) {
return union_type; return union_type;
} }
c10::optional<TypePtr> UnionType::subtractTypeSet(std::vector<TypePtr>& to_subtract) const { std::optional<TypePtr> UnionType::subtractTypeSet(std::vector<TypePtr>& to_subtract) const {
return subtractTypeSetFrom(to_subtract, containedTypes()); return subtractTypeSetFrom(to_subtract, containedTypes());
} }
c10::optional<TypePtr> UnionType::toOptional() const { std::optional<TypePtr> UnionType::toOptional() const {
if (!canHoldType(*NoneType::get())) { if (!canHoldType(*NoneType::get())) {
return c10::nullopt; return c10::nullopt;
} }
@ -432,7 +432,7 @@ bool UnionType::canHoldType(const Type& type) const {
bool OptionalType::equals(const Type& rhs) const { bool OptionalType::equals(const Type& rhs) const {
if (auto union_rhs = rhs.cast<UnionType>()) { if (auto union_rhs = rhs.cast<UnionType>()) {
auto optional_rhs = union_rhs->toOptional(); auto optional_rhs = union_rhs->toOptional();
// `**optional_rhs` = `*` to get value of `c10::optional<TypePtr>`, // `**optional_rhs` = `*` to get value of `std::optional<TypePtr>`,
// then `*` to dereference the pointer // then `*` to dereference the pointer
return optional_rhs && *this == **optional_rhs; return optional_rhs && *this == **optional_rhs;
} else if (auto optional_rhs = rhs.cast<OptionalType>()) { } else if (auto optional_rhs = rhs.cast<OptionalType>()) {

View File

@ -105,7 +105,7 @@ struct CUDACachingHostAllocatorImpl
} }
void record_stream( void record_stream(
c10::optional<std::vector<EventPool::Event>>& events, std::optional<std::vector<EventPool::Event>>& events,
CUDAStream stream) override { CUDAStream stream) override {
auto event = create_event_internal(stream.device_index()); auto event = create_event_internal(stream.device_index());
event->record(stream); event->record(stream);

View File

@ -8,8 +8,8 @@ namespace at::detail {
TensorBase empty_cuda( TensorBase empty_cuda(
IntArrayRef size, IntArrayRef size,
ScalarType dtype, ScalarType dtype,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<c10::MemoryFormat> memory_format_opt) { std::optional<c10::MemoryFormat> memory_format_opt) {
at::globalContext().lazyInitCUDA(); at::globalContext().lazyInitCUDA();
const auto device = device_or_default(device_opt); const auto device = device_or_default(device_opt);
TORCH_INTERNAL_ASSERT(device.is_cuda()); TORCH_INTERNAL_ASSERT(device.is_cuda());
@ -22,11 +22,11 @@ TensorBase empty_cuda(
TensorBase empty_cuda( TensorBase empty_cuda(
IntArrayRef size, IntArrayRef size,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt, std::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt) { std::optional<c10::MemoryFormat> memory_format_opt) {
TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned"); TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned");
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
@ -49,7 +49,7 @@ TensorBase empty_strided_cuda(
IntArrayRef size, IntArrayRef size,
IntArrayRef stride, IntArrayRef stride,
ScalarType dtype, ScalarType dtype,
c10::optional<Device> device_opt) { std::optional<Device> device_opt) {
at::globalContext().lazyInitCUDA(); at::globalContext().lazyInitCUDA();
const auto device = device_or_default(device_opt); const auto device = device_or_default(device_opt);
TORCH_INTERNAL_ASSERT(device.is_cuda()); TORCH_INTERNAL_ASSERT(device.is_cuda());
@ -63,10 +63,10 @@ TensorBase empty_strided_cuda(
TensorBase empty_strided_cuda( TensorBase empty_strided_cuda(
IntArrayRef size, IntArrayRef size,
IntArrayRef stride, IntArrayRef stride,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt) { std::optional<bool> pin_memory_opt) {
TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned"); TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned");
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);

View File

@ -6,16 +6,16 @@ namespace at::detail {
TORCH_CUDA_CPP_API TensorBase empty_cuda( TORCH_CUDA_CPP_API TensorBase empty_cuda(
IntArrayRef size, IntArrayRef size,
ScalarType dtype, ScalarType dtype,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<c10::MemoryFormat> memory_format_opt); std::optional<c10::MemoryFormat> memory_format_opt);
TORCH_CUDA_CPP_API TensorBase empty_cuda( TORCH_CUDA_CPP_API TensorBase empty_cuda(
IntArrayRef size, IntArrayRef size,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt, std::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt); std::optional<c10::MemoryFormat> memory_format_opt);
TORCH_CUDA_CPP_API TensorBase empty_cuda( TORCH_CUDA_CPP_API TensorBase empty_cuda(
IntArrayRef size, IntArrayRef size,
@ -25,15 +25,15 @@ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
IntArrayRef size, IntArrayRef size,
IntArrayRef stride, IntArrayRef stride,
ScalarType dtype, ScalarType dtype,
c10::optional<Device> device_opt); std::optional<Device> device_opt);
TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
IntArrayRef size, IntArrayRef size,
IntArrayRef stride, IntArrayRef stride,
c10::optional<ScalarType> dtype_opt, std::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, std::optional<Layout> layout_opt,
c10::optional<Device> device_opt, std::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt); std::optional<bool> pin_memory_opt);
TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
IntArrayRef size, IntArrayRef size,

View File

@ -8,13 +8,13 @@
namespace at::native { namespace at::native {
bool is_pinned_cuda(const Tensor& self, c10::optional<Device> device) { bool is_pinned_cuda(const Tensor& self, std::optional<Device> device) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda()); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda());
// TODO: unhook this // TODO: unhook this
return detail::getCUDAHooks().isPinnedPtr(self.storage().data()); return detail::getCUDAHooks().isPinnedPtr(self.storage().data());
} }
Tensor _pin_memory_cuda(const Tensor& self, c10::optional<Device> device) { Tensor _pin_memory_cuda(const Tensor& self, std::optional<Device> device) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda()); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda());
auto* allocator = at::cuda::getPinnedMemoryAllocator(); auto* allocator = at::cuda::getPinnedMemoryAllocator();
auto storage = Storage( auto storage = Storage(

View File

@ -22,9 +22,9 @@ std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>
_cudnn_rnn_cast_reflatten(const Tensor & input, _cudnn_rnn_cast_reflatten(const Tensor & input,
TensorList weight, TensorList weight,
int64_t weight_stride0, int64_t weight_stride0,
const c10::optional<Tensor>& weight_buf_opt, const std::optional<Tensor>& weight_buf_opt,
const Tensor& hx, const Tensor& hx,
const c10::optional<Tensor>& cx, const std::optional<Tensor>& cx,
int64_t mode, int64_t mode,
int64_t hidden_size, int64_t hidden_size,
int64_t proj_size, int64_t proj_size,
@ -34,7 +34,7 @@ _cudnn_rnn_cast_reflatten(const Tensor & input,
bool train, bool train,
bool bidirectional, bool bidirectional,
IntArrayRef batch_sizes, IntArrayRef batch_sizes,
const c10::optional<Tensor>& dropout_state) { const std::optional<Tensor>& dropout_state) {
#if AT_CUDNN_ENABLED() #if AT_CUDNN_ENABLED()
c10::impl::ExcludeDispatchKeyGuard no_autocast(DispatchKey::Autocast); c10::impl::ExcludeDispatchKeyGuard no_autocast(DispatchKey::Autocast);

View File

@ -303,7 +303,7 @@ static std::tuple<Tensor, optional<int64_t>> log_sigmoid_backward_batch_rule(
return std::make_tuple(at::log_sigmoid_backward(out_grad, out_self, out_buffer), 0); return std::make_tuple(at::log_sigmoid_backward(out_grad, out_self, out_buffer), 0);
} }
static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen) { static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, std::optional<Generator> gen) {
return at::binomial(count, prob.contiguous(), std::move(gen)); // Bug in PyTorch, prob shouldn't need to be contiguous return at::binomial(count, prob.contiguous(), std::move(gen)); // Bug in PyTorch, prob shouldn't need to be contiguous
} }
@ -457,7 +457,7 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {
using TensorScalarInplaceT = Tensor& (Tensor::*)(const Tensor&, const Scalar&) const; using TensorScalarInplaceT = Tensor& (Tensor::*)(const Tensor&, const Scalar&) const;
using ScalarScalarInplaceT = Tensor& (Tensor::*)(const Scalar&, const Scalar&) const; using ScalarScalarInplaceT = Tensor& (Tensor::*)(const Scalar&, const Scalar&) const;
using TensorInplaceT = Tensor& (Tensor::*)(const Tensor&) const; using TensorInplaceT = Tensor& (Tensor::*)(const Tensor&) const;
using TensorInplaceModeT = Tensor& (Tensor::*)(const Tensor&, c10::optional<c10::string_view>) const; using TensorInplaceModeT = Tensor& (Tensor::*)(const Tensor&, std::optional<c10::string_view>) const;
using ScalarInplaceT = Tensor& (Tensor::*)(const Scalar&) const; using ScalarInplaceT = Tensor& (Tensor::*)(const Scalar&) const;
using CopyT = Tensor& (Tensor::*)(const Tensor&, bool) const; using CopyT = Tensor& (Tensor::*)(const Tensor&, bool) const;
@ -471,7 +471,7 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {
VMAP_SUPPORT2(mul_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::mul_>)); VMAP_SUPPORT2(mul_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::mul_>));
VMAP_SUPPORT2(mul_, Scalar, SINGLE_ARG(unary_inplace_batch_rule<ScalarInplaceT, &Tensor::mul_, const Scalar&>)); VMAP_SUPPORT2(mul_, Scalar, SINGLE_ARG(unary_inplace_batch_rule<ScalarInplaceT, &Tensor::mul_, const Scalar&>));
VMAP_SUPPORT2(div_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::div_>)); VMAP_SUPPORT2(div_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::div_>));
VMAP_SUPPORT2(div_, Tensor_mode, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceModeT, &Tensor::div_, c10::optional<c10::string_view>>)); VMAP_SUPPORT2(div_, Tensor_mode, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceModeT, &Tensor::div_, std::optional<c10::string_view>>));
VMAP_SUPPORT2(div_, Scalar, SINGLE_ARG(unary_inplace_batch_rule<ScalarInplaceT, &Tensor::div_, const Scalar&>)); VMAP_SUPPORT2(div_, Scalar, SINGLE_ARG(unary_inplace_batch_rule<ScalarInplaceT, &Tensor::div_, const Scalar&>));
VMAP_SUPPORT2(clamp_min_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::clamp_min_>)); VMAP_SUPPORT2(clamp_min_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::clamp_min_>));
VMAP_SUPPORT2(clamp_max_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::clamp_max_>)); VMAP_SUPPORT2(clamp_max_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::clamp_max_>));

View File

@ -124,7 +124,7 @@ convolution_batch_rule(const Tensor& lhs, optional<int64_t> lhs_bdim, const Tens
} }
static Tensor _convolution_decomp( static Tensor _convolution_decomp(
const Tensor& input_r, const Tensor& weight_r, const c10::optional<Tensor>& bias_r_opt, const Tensor& input_r, const Tensor& weight_r, const std::optional<Tensor>& bias_r_opt,
IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_, IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_,
bool transposed_, IntArrayRef output_padding_, int64_t groups_, bool transposed_, IntArrayRef output_padding_, int64_t groups_,
bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {

View File

@ -107,11 +107,11 @@ static std::tuple<Tensor,optional<int64_t>> linspace_logspace_batch_rule_helper(
const at::Tensor& start, optional<int64_t> start_bdim, const at::Tensor& start, optional<int64_t> start_bdim,
const at::Tensor& end, optional<int64_t> end_bdim, const at::Tensor& end, optional<int64_t> end_bdim,
int64_t steps, int64_t steps,
c10::optional<double> base, std::optional<double> base,
c10::optional<at::ScalarType> dtype, std::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, std::optional<at::Layout> layout,
c10::optional<at::Device> device, std::optional<at::Device> device,
c10::optional<bool> pin_memory) std::optional<bool> pin_memory)
{ {
auto batch_size = get_bdim_size2(start, start_bdim, end, end_bdim); auto batch_size = get_bdim_size2(start, start_bdim, end, end_bdim);
auto start_ = ensure_has_bdim(start, start_bdim.has_value(), batch_size); auto start_ = ensure_has_bdim(start, start_bdim.has_value(), batch_size);
@ -145,10 +145,10 @@ static std::tuple<Tensor,optional<int64_t>> linspace_Tensor_Tensor_batch_rule(
const at::Tensor& start, optional<int64_t> start_bdim, const at::Tensor& start, optional<int64_t> start_bdim,
const at::Tensor& end, optional<int64_t> end_bdim, const at::Tensor& end, optional<int64_t> end_bdim,
int64_t steps, int64_t steps,
c10::optional<at::ScalarType> dtype, std::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, std::optional<at::Layout> layout,
c10::optional<at::Device> device, std::optional<at::Device> device,
c10::optional<bool> pin_memory){ std::optional<bool> pin_memory){
return linspace_logspace_batch_rule_helper(start, start_bdim, end, end_bdim, steps, c10::nullopt, dtype, layout, device, pin_memory); return linspace_logspace_batch_rule_helper(start, start_bdim, end, end_bdim, steps, c10::nullopt, dtype, layout, device, pin_memory);
} }
@ -156,10 +156,10 @@ static std::tuple<Tensor,optional<int64_t>> linspace_Tensor_Scalar_batch_rule(
const at::Tensor& start, optional<int64_t> start_bdim, const at::Tensor& start, optional<int64_t> start_bdim,
const at::Scalar& end, const at::Scalar& end,
int64_t steps, int64_t steps,
c10::optional<at::ScalarType> dtype, std::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, std::optional<at::Layout> layout,
c10::optional<at::Device> device, std::optional<at::Device> device,
c10::optional<bool> pin_memory){ std::optional<bool> pin_memory){
auto end_t = at::native::wrapped_scalar_tensor(end, start.device()); auto end_t = at::native::wrapped_scalar_tensor(end, start.device());
return linspace_logspace_batch_rule_helper(start, start_bdim, end_t, c10::nullopt, steps, c10::nullopt, dtype, layout, device, pin_memory); return linspace_logspace_batch_rule_helper(start, start_bdim, end_t, c10::nullopt, steps, c10::nullopt, dtype, layout, device, pin_memory);
@ -169,10 +169,10 @@ static std::tuple<Tensor,optional<int64_t>> linspace_Scalar_Tensor_batch_rule(
const at::Scalar& start, const at::Scalar& start,
const at::Tensor& end, optional<int64_t> end_bdim, const at::Tensor& end, optional<int64_t> end_bdim,
int64_t steps, int64_t steps,
c10::optional<at::ScalarType> dtype, std::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, std::optional<at::Layout> layout,
c10::optional<at::Device> device, std::optional<at::Device> device,
c10::optional<bool> pin_memory){ std::optional<bool> pin_memory){
auto start_t = at::native::wrapped_scalar_tensor(start, end.device()); auto start_t = at::native::wrapped_scalar_tensor(start, end.device());
return linspace_logspace_batch_rule_helper(start_t, c10::nullopt, end, end_bdim, steps, c10::nullopt, dtype, layout, device, pin_memory); return linspace_logspace_batch_rule_helper(start_t, c10::nullopt, end, end_bdim, steps, c10::nullopt, dtype, layout, device, pin_memory);
@ -183,10 +183,10 @@ static std::tuple<Tensor,optional<int64_t>> logspace_Tensor_Tensor_batch_rule(
const at::Tensor& end, optional<int64_t> end_bdim, const at::Tensor& end, optional<int64_t> end_bdim,
int64_t steps, int64_t steps,
double base, double base,
c10::optional<at::ScalarType> dtype, std::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, std::optional<at::Layout> layout,
c10::optional<at::Device> device, std::optional<at::Device> device,
c10::optional<bool> pin_memory){ std::optional<bool> pin_memory){
return linspace_logspace_batch_rule_helper(start, start_bdim, end, end_bdim, steps, c10::make_optional(base), dtype, layout, device, pin_memory); return linspace_logspace_batch_rule_helper(start, start_bdim, end, end_bdim, steps, c10::make_optional(base), dtype, layout, device, pin_memory);
} }
@ -195,10 +195,10 @@ static std::tuple<Tensor,optional<int64_t>> logspace_Tensor_Scalar_batch_rule(
const at::Scalar& end, const at::Scalar& end,
int64_t steps, int64_t steps,
double base, double base,
c10::optional<at::ScalarType> dtype, std::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, std::optional<at::Layout> layout,
c10::optional<at::Device> device, std::optional<at::Device> device,
c10::optional<bool> pin_memory){ std::optional<bool> pin_memory){
auto end_t = at::native::wrapped_scalar_tensor(end, start.device()); auto end_t = at::native::wrapped_scalar_tensor(end, start.device());
return linspace_logspace_batch_rule_helper(start, start_bdim, end_t, c10::nullopt, steps, c10::make_optional(base), dtype, layout, device, pin_memory); return linspace_logspace_batch_rule_helper(start, start_bdim, end_t, c10::nullopt, steps, c10::make_optional(base), dtype, layout, device, pin_memory);
@ -209,10 +209,10 @@ static std::tuple<Tensor,optional<int64_t>> logspace_Scalar_Tensor_batch_rule(
const at::Tensor& end, optional<int64_t> end_bdim, const at::Tensor& end, optional<int64_t> end_bdim,
int64_t steps, int64_t steps,
double base, double base,
c10::optional<at::ScalarType> dtype, std::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, std::optional<at::Layout> layout,
c10::optional<at::Device> device, std::optional<at::Device> device,
c10::optional<bool> pin_memory){ std::optional<bool> pin_memory){
auto start_t = at::native::wrapped_scalar_tensor(start, end.device()); auto start_t = at::native::wrapped_scalar_tensor(start, end.device());
return linspace_logspace_batch_rule_helper(start_t, c10::nullopt, end, end_bdim, steps, c10::make_optional(base), dtype, layout, device, pin_memory); return linspace_logspace_batch_rule_helper(start_t, c10::nullopt, end, end_bdim, steps, c10::make_optional(base), dtype, layout, device, pin_memory);

View File

@ -157,9 +157,9 @@ void _linalg_check_errors_batch_rule(const Tensor& info, optional<int64_t> info_
at::_linalg_check_errors(info_, api_name, false); at::_linalg_check_errors(info_, api_name, false);
} }
std::tuple<Tensor, c10::optional<int64_t>> std::tuple<Tensor, std::optional<int64_t>>
householder_product_batch_rule(const Tensor &input, c10::optional<int64_t> input_bdim, householder_product_batch_rule(const Tensor &input, std::optional<int64_t> input_bdim,
const Tensor &tau, c10::optional<int64_t> tau_bdim) const Tensor &tau, std::optional<int64_t> tau_bdim)
{ {
auto input_ = moveBatchDimToFront(input, input_bdim); auto input_ = moveBatchDimToFront(input, input_bdim);
auto tau_ = moveBatchDimToFront(tau, tau_bdim); auto tau_ = moveBatchDimToFront(tau, tau_bdim);
@ -330,8 +330,8 @@ oneOutput linalg_lu_solve_batch_rule(
} }
oneOutput cholesky_solve_batch_rule( oneOutput cholesky_solve_batch_rule(
const Tensor& self, c10::optional<int64_t> self_bdim, const Tensor& self, std::optional<int64_t> self_bdim,
const Tensor& A, c10::optional<int64_t> A_bdim, const Tensor& A, std::optional<int64_t> A_bdim,
bool upper) { bool upper) {
TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2,
"b should have at least 2 dimensions, but has ", self.dim(), " dimensions instead"); "b should have at least 2 dimensions, but has ", self.dim(), " dimensions instead");
@ -345,14 +345,14 @@ oneOutput cholesky_solve_batch_rule(
} }
threeOutputs linalg_lu_factor_ex_batch_rule( threeOutputs linalg_lu_factor_ex_batch_rule(
const Tensor& A, c10::optional<int64_t> A_bdim, bool pivot, bool check_errors) { const Tensor& A, std::optional<int64_t> A_bdim, bool pivot, bool check_errors) {
TORCH_CHECK(rankWithoutBatchDim(A, A_bdim) >= 2, "torch.lu_factor_ex: Expected tensor with 2 or more dimensions. Got size: ", A.sizes(), " instead"); TORCH_CHECK(rankWithoutBatchDim(A, A_bdim) >= 2, "torch.lu_factor_ex: Expected tensor with 2 or more dimensions. Got size: ", A.sizes(), " instead");
const auto A_ = moveBatchDimToFront(A, A_bdim); const auto A_ = moveBatchDimToFront(A, A_bdim);
const auto res = at::linalg_lu_factor_ex(A_, pivot, check_errors); const auto res = at::linalg_lu_factor_ex(A_, pivot, check_errors);
return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0); return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0);
} }
oneOutput matrix_exp_batch_rule(const Tensor& self, c10::optional<int64_t> self_bdim) { oneOutput matrix_exp_batch_rule(const Tensor& self, std::optional<int64_t> self_bdim) {
TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "torch.matrix_exp: The input tensor A must have at least 2 dimensions."); TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "torch.matrix_exp: The input tensor A must have at least 2 dimensions.");
const auto self_ = moveBatchDimToFront(self, self_bdim).contiguous(); // seems to be a bug const auto self_ = moveBatchDimToFront(self, self_bdim).contiguous(); // seems to be a bug
return std::make_tuple(at::matrix_exp(self_), 0); return std::make_tuple(at::matrix_exp(self_), 0);
@ -400,8 +400,8 @@ fourOutputs solve_ex_batch_rule(
return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0, std::get<3>(res), 0); return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0, std::get<3>(res), 0);
} }
oneOutput cross_batch_rule(const Tensor& self, c10::optional<int64_t> self_bdim, oneOutput cross_batch_rule(const Tensor& self, std::optional<int64_t> self_bdim,
const Tensor& other, c10::optional<int64_t> other_bdim, const int64_t dim) { const Tensor& other, std::optional<int64_t> other_bdim, const int64_t dim) {
// match cross dimension checks // match cross dimension checks
TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) == rankWithoutBatchDim(other, other_bdim), TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) == rankWithoutBatchDim(other, other_bdim),
"linalg.cross: inputs must have the same number of dimensions." "linalg.cross: inputs must have the same number of dimensions."
@ -418,16 +418,16 @@ oneOutput cross_batch_rule(const Tensor& self, c10::optional<int64_t> self_bdim,
return std::make_tuple(linalg_cross(self_, other_, dim_), 0); return std::make_tuple(linalg_cross(self_, other_, dim_), 0);
} }
c10::optional<int64_t> batch_dim_if_not_empty(const Tensor& t) { std::optional<int64_t> batch_dim_if_not_empty(const Tensor& t) {
if (t.dim() == 1 && t.size(0) == 0) { if (t.dim() == 1 && t.size(0) == 0) {
return c10::optional<int64_t>(); return std::optional<int64_t>();
} }
return c10::optional<int64_t>(0); return std::optional<int64_t>(0);
} }
fourOutputs linalg_lstsq_batch_rule( fourOutputs linalg_lstsq_batch_rule(
const Tensor& self, c10::optional<int64_t> self_bdim, const Tensor& b, c10::optional<int64_t> b_bdim, const Tensor& self, std::optional<int64_t> self_bdim, const Tensor& b, c10::optional<int64_t> b_bdim,
c10::optional<double> rcond, c10::optional<c10::string_view> driver) { std::optional<double> rcond, c10::optional<c10::string_view> driver) {
TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "torch.linalg.lstsq: input must have at least 2 dimensions."); TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "torch.linalg.lstsq: input must have at least 2 dimensions.");
TORCH_CHECK(rankWithoutBatchDim(b, b_bdim) >= 1, "torch.linalg.lstsq: other must have at least 1 dimension."); TORCH_CHECK(rankWithoutBatchDim(b, b_bdim) >= 1, "torch.linalg.lstsq: other must have at least 1 dimension.");
@ -449,7 +449,7 @@ fourOutputs linalg_lstsq_batch_rule(
} }
template<typename F> template<typename F>
std::tuple<Tensor, c10::optional<int64_t>> std::tuple<Tensor, std::optional<int64_t>>
atol_rtol_tensor_batch_rule( atol_rtol_tensor_batch_rule(
F Func, const Tensor& input, optional<int64_t> input_bdim, F Func, const Tensor& input, optional<int64_t> input_bdim,
const optional<Tensor>& atol, const optional<int64_t> atol_bdim, const optional<Tensor>& atol, const optional<int64_t> atol_bdim,
@ -478,11 +478,11 @@ atol_rtol_tensor_batch_rule(
return std::make_tuple(Func(input_, atol_, rtol_, hermitian), 0); return std::make_tuple(Func(input_, atol_, rtol_, hermitian), 0);
} }
static std::tuple<Tensor, c10::optional<int64_t>> static std::tuple<Tensor, std::optional<int64_t>>
pinv_batch_rule( pinv_batch_rule(
const Tensor& input, c10::optional<int64_t> input_bdim, const optional<Tensor>& atol, const Tensor& input, std::optional<int64_t> input_bdim, const optional<Tensor>& atol,
const c10::optional<int64_t> atol_bdim, const optional<Tensor>& rtol, const std::optional<int64_t> atol_bdim, const optional<Tensor>& rtol,
const c10::optional<int64_t> rtol_bdim, bool hermitian) { const std::optional<int64_t> rtol_bdim, bool hermitian) {
return atol_rtol_tensor_batch_rule(ATEN_FN2(linalg_pinv, atol_rtol_tensor), input, input_bdim, atol, atol_bdim, rtol, rtol_bdim, hermitian, "linalg.pinv"); return atol_rtol_tensor_batch_rule(ATEN_FN2(linalg_pinv, atol_rtol_tensor), input, input_bdim, atol, atol_bdim, rtol, rtol_bdim, hermitian, "linalg.pinv");
} }
} }

View File

@ -123,7 +123,7 @@ static Tensor binary_cross_entropy_plumbing(
static Tensor binary_cross_entropy_backward_plumbing( static Tensor binary_cross_entropy_backward_plumbing(
const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& grad, const Tensor& input, const Tensor& target,
const c10::optional<Tensor>& weight_opt, int64_t reduction) { const std::optional<Tensor>& weight_opt, int64_t reduction) {
auto maybe_layer = maybeCurrentDynamicLayer(); auto maybe_layer = maybeCurrentDynamicLayer();
vmap_check_escaped(maybe_layer, "binary_cross_entropy_backward_plumbing"); vmap_check_escaped(maybe_layer, "binary_cross_entropy_backward_plumbing");
int64_t cur_level = maybe_layer->layerId(); int64_t cur_level = maybe_layer->layerId();

View File

@ -45,10 +45,10 @@ template<typename F, F Func>
std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>>
batch_norm_batch_rule( batch_norm_batch_rule(
const Tensor& input, optional<int64_t> input_bdim, const Tensor& input, optional<int64_t> input_bdim,
const c10::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim, const std::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim,
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim, const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
const c10::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim, const std::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
const c10::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim, const std::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
bool training, double momentum, double eps) { bool training, double momentum, double eps) {
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned; const Tensor& weight = *weight_maybe_owned;
@ -63,7 +63,7 @@ batch_norm_batch_rule(
"were not batched.\nIf you are using a module and do not need eval mode, please set `track_running_stats` to be False.", "were not batched.\nIf you are using a module and do not need eval mode, please set `track_running_stats` to be False.",
"If you are using a prebuilt module and do not need eval mode, please see the functorch website for resources on ", "If you are using a prebuilt module and do not need eval mode, please see the functorch website for resources on ",
"how to patch your module to work with vmap"); "how to patch your module to work with vmap");
c10::optional<int64_t> bdim_size; std::optional<int64_t> bdim_size;
Tensor result0; Tensor result0;
Tensor mean; Tensor mean;
Tensor rstd; Tensor rstd;
@ -80,8 +80,8 @@ batch_norm_batch_rule(
input_ = ensure_has_bdim(input_, input_bdim.has_value(), bdim_size.value()); input_ = ensure_has_bdim(input_, input_bdim.has_value(), bdim_size.value());
input_ = reshape_dim_into(0, /*channels dim*/1, input_); input_ = reshape_dim_into(0, /*channels dim*/1, input_);
c10::optional<Tensor> running_mean_; std::optional<Tensor> running_mean_;
c10::optional<Tensor> running_var_; std::optional<Tensor> running_var_;
if (running_mean.defined()) { if (running_mean.defined()) {
running_mean_ = moveBatchDimToFront(running_mean, running_mean_bdim); running_mean_ = moveBatchDimToFront(running_mean, running_mean_bdim);
running_mean_ = ensure_has_bdim(*running_mean_, running_mean_bdim.has_value(), bdim_size.value()); running_mean_ = ensure_has_bdim(*running_mean_, running_mean_bdim.has_value(), bdim_size.value());
@ -127,8 +127,8 @@ template<typename F, F Func>
std::tuple<at::Tensor,optional<int64_t>> batch_norm_backward_no_weight_bias_batch_rule( std::tuple<at::Tensor,optional<int64_t>> batch_norm_backward_no_weight_bias_batch_rule(
const at::Tensor & grad_out, optional<int64_t> grad_out_bdim, const at::Tensor & grad_out, optional<int64_t> grad_out_bdim,
const at::Tensor & input, optional<int64_t> input_bdim, const at::Tensor & input, optional<int64_t> input_bdim,
const c10::optional<at::Tensor> & running_mean_opt, optional<int64_t> running_mean_bdim, const std::optional<at::Tensor> & running_mean_opt, optional<int64_t> running_mean_bdim,
const c10::optional<at::Tensor> & running_var_opt, optional<int64_t> running_var_bdim, const std::optional<at::Tensor> & running_var_opt, optional<int64_t> running_var_bdim,
const at::Tensor & mean, optional<int64_t> mean_bdim, const at::Tensor & mean, optional<int64_t> mean_bdim,
const at::Tensor & rstd, optional<int64_t> rstd_bdim, const at::Tensor & rstd, optional<int64_t> rstd_bdim,
bool training, double eps) { bool training, double eps) {
@ -199,11 +199,11 @@ template<typename F, F Func>
std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_plumbing( std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_plumbing(
const at::Tensor & grad_out, const at::Tensor & grad_out,
const at::Tensor & input, const at::Tensor & input,
const c10::optional<at::Tensor> & weight_opt, const std::optional<at::Tensor> & weight_opt,
const c10::optional<at::Tensor> & running_mean_opt, const std::optional<at::Tensor> & running_mean_opt,
const c10::optional<at::Tensor> & running_var_opt, const std::optional<at::Tensor> & running_var_opt,
const c10::optional<at::Tensor> & save_mean_opt, const std::optional<at::Tensor> & save_mean_opt,
const c10::optional<at::Tensor> & save_rstd_opt, const std::optional<at::Tensor> & save_rstd_opt,
bool training, bool training,
double eps, double eps,
std::array<bool,3> output_mask) { std::array<bool,3> output_mask) {
@ -284,8 +284,8 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_plumbing(
} }
static std::tuple<Tensor,Tensor,Tensor> native_group_norm_plumbing( static std::tuple<Tensor,Tensor,Tensor> native_group_norm_plumbing(
const Tensor & input, const c10::optional<Tensor> & weight_opt, const Tensor & input, const std::optional<Tensor> & weight_opt,
const c10::optional<Tensor> & bias_opt, int64_t N, int64_t C, const std::optional<Tensor> & bias_opt, int64_t N, int64_t C,
int64_t HxW, int64_t group, double eps) { int64_t HxW, int64_t group, double eps) {
// See [Note: hacky wrapper removal for optional tensor] // See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
@ -372,7 +372,7 @@ static std::tuple<at::Tensor,optional<int64_t>> group_norm_backward_no_weight_bi
static std::tuple<Tensor,Tensor,Tensor> native_group_norm_backward_plumbing( static std::tuple<Tensor,Tensor,Tensor> native_group_norm_backward_plumbing(
const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & grad_out, const Tensor & input, const Tensor & mean,
const Tensor & rstd, const c10::optional<Tensor> & weight_opt, const Tensor & rstd, const std::optional<Tensor> & weight_opt,
int64_t N, int64_t C, int64_t HxW, int64_t group, std::array<bool,3> output_mask int64_t N, int64_t C, int64_t HxW, int64_t group, std::array<bool,3> output_mask
) { ) {
// See [Note: hacky wrapper removal for optional tensor] // See [Note: hacky wrapper removal for optional tensor]
@ -488,8 +488,8 @@ static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optio
native_layer_norm_batch_rule( native_layer_norm_batch_rule(
const Tensor& input, optional<int64_t> input_bdim, const Tensor& input, optional<int64_t> input_bdim,
c10::SymIntArrayRef normalized_shape, c10::SymIntArrayRef normalized_shape,
const c10::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim, const std::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim,
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim, const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
double eps) { double eps) {
auto input_ = moveBatchDimToFront(input, input_bdim); auto input_ = moveBatchDimToFront(input, input_bdim);
if (!weight_bdim && !bias_bdim) { if (!weight_bdim && !bias_bdim) {
@ -573,8 +573,8 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_p
at::IntArrayRef normalized_shape, at::IntArrayRef normalized_shape,
const at::Tensor & mean, const at::Tensor & mean,
const at::Tensor & rstd, const at::Tensor & rstd,
const c10::optional<at::Tensor> & weight_opt, const std::optional<at::Tensor> & weight_opt,
const c10::optional<at::Tensor> & bias_opt, const std::optional<at::Tensor> & bias_opt,
std::array<bool,3> output_mask) { std::array<bool,3> output_mask) {
// See [Note: hacky wrapper removal for optional tensor] // See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
@ -653,10 +653,10 @@ template <typename F, F Func>
struct NativeBatchNormBatchRuleHelper { struct NativeBatchNormBatchRuleHelper {
static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> apply( static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> apply(
const Tensor& input, optional<int64_t> input_bdim, const Tensor& input, optional<int64_t> input_bdim,
const c10::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim, const std::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim,
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim, const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
const c10::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim, const std::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
const c10::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim, const std::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
bool training, double momentum, double eps) { bool training, double momentum, double eps) {
return batch_norm_batch_rule<F, Func>( return batch_norm_batch_rule<F, Func>(
input, input_bdim, weight_opt, weight_bdim, bias_opt, bias_bdim, input, input_bdim, weight_opt, weight_bdim, bias_opt, bias_bdim,
@ -669,9 +669,9 @@ struct CudnnBatchNormBatchRuleHelper {
static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> apply( static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> apply(
const Tensor& input, optional<int64_t> input_bdim, const Tensor& input, optional<int64_t> input_bdim,
const Tensor& weight_opt, optional<int64_t> weight_bdim, const Tensor& weight_opt, optional<int64_t> weight_bdim,
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim, const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
const c10::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim, const std::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
const c10::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim, const std::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
bool training, double momentum, double eps) { bool training, double momentum, double eps) {
auto reserve = at::empty({0}, input.options().dtype(kByte)); // in experiments, reserve was never set to anything other than empty by cuda auto reserve = at::empty({0}, input.options().dtype(kByte)); // in experiments, reserve was never set to anything other than empty by cuda
auto res = batch_norm_batch_rule<F, Func>( auto res = batch_norm_batch_rule<F, Func>(
@ -686,9 +686,9 @@ struct MiopenBatchNormBatchRuleHelper {
static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> apply( static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> apply(
const Tensor& input, optional<int64_t> input_bdim, const Tensor& input, optional<int64_t> input_bdim,
const Tensor& weight_opt, optional<int64_t> weight_bdim, const Tensor& weight_opt, optional<int64_t> weight_bdim,
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim, const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
const c10::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim, const std::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
const c10::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim, const std::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
bool training, double momentum, double eps) { bool training, double momentum, double eps) {
return batch_norm_batch_rule<F, Func>( return batch_norm_batch_rule<F, Func>(
input, input_bdim, weight_opt, weight_bdim, bias_opt, bias_bdim, input, input_bdim, weight_opt, weight_bdim, bias_opt, bias_bdim,
@ -716,11 +716,11 @@ struct NativeBatchNormBackwardBatchRuleHelper {
static std::tuple<Tensor,Tensor,Tensor> apply( static std::tuple<Tensor,Tensor,Tensor> apply(
const at::Tensor & grad_out, const at::Tensor & grad_out,
const at::Tensor & input, const at::Tensor & input,
const c10::optional<at::Tensor> & weight_opt, const std::optional<at::Tensor> & weight_opt,
const c10::optional<at::Tensor> & running_mean_opt, const std::optional<at::Tensor> & running_mean_opt,
const c10::optional<at::Tensor> & running_var_opt, const std::optional<at::Tensor> & running_var_opt,
const c10::optional<at::Tensor> & save_mean_opt, const std::optional<at::Tensor> & save_mean_opt,
const c10::optional<at::Tensor> & save_rstd_opt, const std::optional<at::Tensor> & save_rstd_opt,
bool training, bool training,
double eps, double eps,
std::array<bool,3> output_mask) { std::array<bool,3> output_mask) {
@ -748,10 +748,10 @@ struct CudnnBatchNormBackwardBatchRuleHelper {
const at::Tensor & input, const at::Tensor & input,
const at::Tensor & grad_out, const at::Tensor & grad_out,
const at::Tensor & weight, const at::Tensor & weight,
const c10::optional<at::Tensor> & running_mean_opt, const std::optional<at::Tensor> & running_mean_opt,
const c10::optional<at::Tensor> & running_var_opt, const std::optional<at::Tensor> & running_var_opt,
const c10::optional<at::Tensor> & save_mean_opt, const std::optional<at::Tensor> & save_mean_opt,
const c10::optional<at::Tensor> & save_rstd_opt, const std::optional<at::Tensor> & save_rstd_opt,
double eps, double eps,
const at::Tensor & reserve) { const at::Tensor & reserve) {
@ -777,10 +777,10 @@ struct MiopenBatchNormBackwardBatchRuleHelper {
const at::Tensor & input, const at::Tensor & input,
const at::Tensor & grad_out, const at::Tensor & grad_out,
const at::Tensor & weight, const at::Tensor & weight,
const c10::optional<at::Tensor> & running_mean_opt, const std::optional<at::Tensor> & running_mean_opt,
const c10::optional<at::Tensor> & running_var_opt, const std::optional<at::Tensor> & running_var_opt,
const c10::optional<at::Tensor> & save_mean_opt, const std::optional<at::Tensor> & save_mean_opt,
const c10::optional<at::Tensor> & save_rstd_opt, const std::optional<at::Tensor> & save_rstd_opt,
double eps) { double eps) {
auto maybe_layer = maybeCurrentDynamicLayer(); auto maybe_layer = maybeCurrentDynamicLayer();
@ -818,10 +818,10 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward_wr
const at::Tensor & grad_out, const at::Tensor & grad_out,
const at::Tensor & input, const at::Tensor & input,
const at::Tensor& weight_opt, const at::Tensor& weight_opt,
const c10::optional<at::Tensor> & running_mean_opt, const std::optional<at::Tensor> & running_mean_opt,
const c10::optional<at::Tensor> & running_var_opt, const std::optional<at::Tensor> & running_var_opt,
const c10::optional<at::Tensor> & save_mean_opt, const std::optional<at::Tensor> & save_mean_opt,
const c10::optional<at::Tensor> & save_rstd_opt, const std::optional<at::Tensor> & save_rstd_opt,
bool training, bool training,
double eps, double eps,
std::array<bool,3> output_mask) { std::array<bool,3> output_mask) {
@ -834,10 +834,10 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_w
const at::Tensor & grad_out, const at::Tensor & grad_out,
const at::Tensor & input, const at::Tensor & input,
const at::Tensor& weight_opt, const at::Tensor& weight_opt,
const c10::optional<at::Tensor> & running_mean_opt, const std::optional<at::Tensor> & running_mean_opt,
const c10::optional<at::Tensor> & running_var_opt, const std::optional<at::Tensor> & running_var_opt,
const c10::optional<at::Tensor> & save_mean_opt, const std::optional<at::Tensor> & save_mean_opt,
const c10::optional<at::Tensor> & save_rstd_opt, const std::optional<at::Tensor> & save_rstd_opt,
bool training, bool training,
double eps, double eps,
std::array<bool,3> output_mask) { std::array<bool,3> output_mask) {
@ -850,13 +850,13 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_w
// work with dynamo anyway so we gain some buffer room to do wrong things here. The (reasonable) hope is that we will // work with dynamo anyway so we gain some buffer room to do wrong things here. The (reasonable) hope is that we will
// make native_batch_norm composite implicit within a few weeks and we can fix this before vmap works with dynamo. // make native_batch_norm composite implicit within a few weeks and we can fix this before vmap works with dynamo.
static std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_batch( static std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_batch(
const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps) { Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps) {
return at::native_batch_norm(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, eps); return at::native_batch_norm(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, eps);
} }
static std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats_batch( static std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats_batch(
const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
bool train, double momentum, double eps) { bool train, double momentum, double eps) {
return at::native_batch_norm(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps); return at::native_batch_norm(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps);
} }

View File

@ -58,7 +58,7 @@ Tensor& random_inplace_batching_rule(Tensor& self, ExtraArgs... extra_args) {
} }
} }
static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) { static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, std::optional<Generator> gen) {
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode); c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
auto maybe_layer = maybeCurrentDynamicLayer(); auto maybe_layer = maybeCurrentDynamicLayer();
auto cur_level = maybe_layer->layerId(); auto cur_level = maybe_layer->layerId();
@ -173,7 +173,7 @@ Tensor tensor_like_random_batch_rule(const Tensor& self, ExtraArgs... extra_args
return (randomness == RandomnessType::Same) ? res : makeBatched(res, 0, cur_level); return (randomness == RandomnessType::Same) ? res : makeBatched(res, 0, cur_level);
} }
static std::tuple<Tensor,Tensor> native_dropout_batching_rule(const Tensor& tensor, double p, c10::optional<bool> train) { static std::tuple<Tensor,Tensor> native_dropout_batching_rule(const Tensor& tensor, double p, std::optional<bool> train) {
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode); c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
auto maybe_layer = maybeCurrentDynamicLayer(); auto maybe_layer = maybeCurrentDynamicLayer();
const auto cur_level = maybe_layer->layerId(); const auto cur_level = maybe_layer->layerId();
@ -213,7 +213,7 @@ static std::tuple<Tensor,Tensor> native_dropout_batching_rule(const Tensor& tens
return std::make_tuple(output, mask); return std::make_tuple(output, mask);
} }
static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const c10::optional<Generator> generator) { static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const std::optional<Generator> generator) {
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode); c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
auto maybe_layer = maybeCurrentDynamicLayer(); auto maybe_layer = maybeCurrentDynamicLayer();
const auto cur_level = maybe_layer->layerId(); const auto cur_level = maybe_layer->layerId();

View File

@ -169,7 +169,7 @@ void boxed_reduction_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack
new_dims.push_back(getPhysicalDim(self, self_bdim.has_value(), dim)); new_dims.push_back(getPhysicalDim(self, self_bdim.has_value(), dim));
} }
bool is_scalar_case = logical_dim == 0 && dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0]); bool is_scalar_case = logical_dim == 0 && dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0]);
c10::optional<bool> maybe_keepdim; std::optional<bool> maybe_keepdim;
if (is_scalar_case) { if (is_scalar_case) {
// NOTE: [boxed_reduction_batch_rule scalar tensor handling] // NOTE: [boxed_reduction_batch_rule scalar tensor handling]
// Reduction operations in PyTorch have an edge case where they allow // Reduction operations in PyTorch have an edge case where they allow
@ -321,9 +321,9 @@ static std::tuple<Tensor,optional<int64_t>> searchsorted_batch_rule(
optional<int64_t> self_bdim, optional<int64_t> self_bdim,
bool out_int32, bool out_int32,
bool right, bool right,
c10::optional<c10::string_view> side, std::optional<c10::string_view> side,
const c10::optional<Tensor>& sorter, const std::optional<Tensor>& sorter,
c10::optional<int64_t> sorter_bdim) { std::optional<int64_t> sorter_bdim) {
auto buckets_logical_rank = rankWithoutBatchDim(sorted_sequence, sorted_sequence_bdim); auto buckets_logical_rank = rankWithoutBatchDim(sorted_sequence, sorted_sequence_bdim);
auto self_logical_rank = rankWithoutBatchDim(self, self_bdim); auto self_logical_rank = rankWithoutBatchDim(self, self_bdim);

Some files were not shown because too many files have changed in this diff Show More