[codemod] c10::optional -> std::optional in caffe2/aten/src/ATen/DeviceGuard.h +117 (#126901)

Summary:
Generated with
```
fbgs -f '.*\.(cpp|cxx|cc|h|hpp|cu|cuh)$' c10::optional -l | perl -pe 's/^fbsource.fbcode.//' | grep -v executorch | xargs -n 50 perl -pi -e 's/c10::optional/std::optional/g'
```

 - If you approve of this diff, please use the "Accept & Ship" button :-)

(117 files modified.)

Test Plan: Sandcastle

Reviewed By: palmje

Pull Request resolved: https://github.com/pytorch/pytorch/pull/126901
Approved by: https://github.com/Skylion007, https://github.com/eqy
This commit is contained in:
Richard Barnes 2024-05-24 00:26:15 +00:00 committed by PyTorch MergeBot
parent 95e5c994f9
commit 3f5b59eef4
89 changed files with 243 additions and 243 deletions

View File

@ -23,7 +23,7 @@ inline std::optional<Device> device_of(const Tensor& t) {
}
}
inline std::optional<Device> device_of(const c10::optional<Tensor>& t) {
inline std::optional<Device> device_of(const std::optional<Tensor>& t) {
return t.has_value() ? device_of(t.value()) : c10::nullopt;
}

View File

@ -220,7 +220,7 @@ Tensor FunctionalInverses::lift_fresh_inverse(const Tensor& base, const Tensor&
return mutated_view;
}
Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t dim, std::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t dim, std::optional<c10::SymInt> start, std::optional<c10::SymInt> end, c10::SymInt step) {
if (inverse_return_mode == InverseReturnMode::AlwaysView) {
// NB: assumes mutated_view is a narrowed view of base.
// We should NOT do this for functionalization

View File

@ -526,7 +526,7 @@ Tensor to_functional_tensor(const Tensor& tensor) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isFunctionalTensor(tensor));
return at::detail::make_tensor<FunctionalTensorWrapper>(tensor);
}
std::optional<Tensor> to_functional_tensor(const c10::optional<Tensor>& tensor) {
std::optional<Tensor> to_functional_tensor(const std::optional<Tensor>& tensor) {
if (tensor.has_value()) {
return c10::make_optional<Tensor>(to_functional_tensor(*tensor));
}
@ -564,7 +564,7 @@ Tensor from_functional_tensor(const Tensor& tensor, bool assert_functional) {
return tensor;
}
}
std::optional<Tensor> from_functional_tensor(const c10::optional<Tensor>& t, bool assert_functional) {
std::optional<Tensor> from_functional_tensor(const std::optional<Tensor>& t, bool assert_functional) {
if (t.has_value()) {
return c10::make_optional<Tensor>(from_functional_tensor(*t, assert_functional));
}

View File

@ -23,7 +23,7 @@ Tensor& scalar_fill(Tensor& self, const Scalar& value) {
return self;
}
Tensor scalar_tensor_static(const Scalar& s, std::optional<ScalarType> dtype_opt, c10::optional<Device> device_opt) {
Tensor scalar_tensor_static(const Scalar& s, std::optional<ScalarType> dtype_opt, std::optional<Device> device_opt) {
at::tracer::impl::NoTracerDispatchMode tracer_guard;
at::AutoDispatchBelowAutograd mode;
Tensor result = at::detail::empty_cpu(

View File

@ -39,7 +39,7 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) {
// CppFunction::makeNamedNotSupported() to avoid listing out the types of everything.
// However, registering e.g. CppFunction::makeNamedNotSupported() as an implementation
// only works for operators that support boxing.
#define TENSOROPTIONS std::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>
#define TENSOROPTIONS std::optional<c10::ScalarType>, std::optional<c10::Layout>, std::optional<c10::Device>, std::optional<bool>
// random operations (out-of-place)
m.impl("bernoulli", unsupportedRandomOp<const Tensor&, optional<Generator>>);

View File

@ -712,7 +712,7 @@ class TORCH_API TensorBase {
/// // f requires grad, has no operation creating it
/// @endcode
/// \fn void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
/// \fn void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, std::optional<TensorList> inputs=c10::nullopt) const;
///
/// Computes the gradient of current tensor with respect to graph leaves.
///

View File

@ -71,7 +71,7 @@ inline typename remove_symint<c10::SymIntArrayRef>::type unpackSymInt(c10::SymIn
}
template <>
inline typename remove_symint<std::optional<c10::SymInt>>::type unpackSymInt(c10::optional<c10::SymInt> x) {
inline typename remove_symint<std::optional<c10::SymInt>>::type unpackSymInt(std::optional<c10::SymInt> x) {
return x.has_value() ? c10::make_optional(x->guard_int(__FILE__, __LINE__)) : c10::nullopt;
}

View File

@ -812,7 +812,7 @@ std::optional<Tensor> called_arg2 = c10::nullopt;
std::optional<int64_t> called_arg3 = c10::nullopt;
std::optional<std::string> called_arg4 = c10::nullopt;
void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -846,7 +846,7 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithOptionalI
EXPECT_FALSE(called_arg4.has_value());
}
std::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
std::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -883,8 +883,8 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithOptionalI
EXPECT_FALSE(called_arg4.has_value());
}
std::tuple<std::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
std::tuple<std::optional<Tensor>, std::optional<int64_t>, std::optional<std::string>>
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4);
}

View File

@ -554,7 +554,7 @@ std::optional<Tensor> called_arg2 = c10::nullopt;
std::optional<int64_t> called_arg3 = c10::nullopt;
std::optional<std::string> called_arg4 = c10::nullopt;
void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -588,7 +588,7 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithOptionalInputs_
EXPECT_FALSE(called_arg4.has_value());
}
std::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
std::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -625,8 +625,8 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithOptionalInputs_
EXPECT_FALSE(called_arg4.has_value());
}
std::tuple<std::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
std::tuple<std::optional<Tensor>, std::optional<int64_t>, std::optional<std::string>>
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4);
}

View File

@ -739,7 +739,7 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp
auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()",
[&] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
[&] (Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -779,7 +779,7 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp
auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?",
[&] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
[&] (Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -822,7 +822,7 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp
auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)",
[] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
[] (Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4);
});
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});

View File

@ -473,7 +473,7 @@ std::optional<std::string> called_arg4 = c10::nullopt;
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()",
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -507,7 +507,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_wi
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?",
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -544,7 +544,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_wi
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
auto registrar = RegisterOperators().op(
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)",
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4);
}));
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});

View File

@ -684,7 +684,7 @@ std::optional<int64_t> called_arg3 = c10::nullopt;
std::optional<std::string> called_arg4 = c10::nullopt;
struct KernelWithOptInputWithoutOutput final : OperatorKernel {
void operator()(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
void operator()(Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -720,7 +720,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithOptionalInputs_w
}
struct KernelWithOptInputWithOutput final : OperatorKernel {
std::optional<Tensor> operator()(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
std::optional<Tensor> operator()(Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
called = true;
called_arg2 = arg2;
called_arg3 = arg3;
@ -759,8 +759,8 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithOptionalInputs_w
}
struct KernelWithOptInputWithMultipleOutputs final : OperatorKernel {
std::tuple<std::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
operator()(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
std::tuple<std::optional<Tensor>, std::optional<int64_t>, std::optional<std::string>>
operator()(Tensor arg1, const std::optional<Tensor>& arg2, std::optional<int64_t> arg3, std::optional<std::string> arg4) {
return std::make_tuple(arg2, arg3, arg4);
}
};

View File

@ -221,7 +221,7 @@ public:
*/
// NB: steals the inferred function schema, as we may need to hold on to
// it for a bit until the real schema turns up
RegistrationHandleRAII registerImpl(OperatorName op_name, std::optional<DispatchKey> dispatch_key, KernelFunction kernel, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
RegistrationHandleRAII registerImpl(OperatorName op_name, std::optional<DispatchKey> dispatch_key, KernelFunction kernel, std::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
/**
* Given an operator, tells the Dispatcher that we have implemented a fake impl

View File

@ -53,7 +53,7 @@ FunctionSchema FunctionSchema::cloneWithRealTypes(bool with_symint) const {
is_varret());
}
bool FunctionSchema::canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const {
bool FunctionSchema::canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lhs, const std::optional<AliasTypeSet> &rhs) const {
if (!lhs || !rhs) {
return false;
}
@ -67,7 +67,7 @@ bool FunctionSchema::canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lh
return false;
}
std::optional<AliasTypeSet> FunctionSchema::getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const {
std::optional<AliasTypeSet> FunctionSchema::getAliasTypeSetContainedTypes(const std::optional<AliasTypeSet> &aliasTypeSet) const {
if (!aliasTypeSet) {
return c10::nullopt;
}

View File

@ -416,10 +416,10 @@ struct TORCH_API FunctionSchema {
// Returns whether the two AliasTypeSets contain any similarities
// ie: whether the two type sets can alias.
bool canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const;
bool canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lhs, const std::optional<AliasTypeSet> &rhs) const;
// Recursively Finds all contained types within the AliasTypeSet.
std::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const;
std::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const std::optional<AliasTypeSet> &aliasTypeSet) const;
// Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp.
// Used to map types to a type such that all types that can alias will be mapped to the same type.

View File

@ -1909,7 +1909,7 @@ std::unordered_map<K, V> generic_to(
}
template <typename T>
std::optional<T> generic_to(IValue ivalue, _fake_type<c10::optional<T>>) {
std::optional<T> generic_to(IValue ivalue, _fake_type<std::optional<T>>) {
if (ivalue.isNone()) {
return c10::nullopt;
}

View File

@ -399,7 +399,7 @@ public:
}
private:
Options&& kernel(std::optional<DispatchKey> dispatch_key, KernelFunction&& func, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
Options&& kernel(std::optional<DispatchKey> dispatch_key, KernelFunction&& func, std::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
KernelRegistrationConfig config;
config.dispatch_key = dispatch_key;
config.func = std::move(func);

View File

@ -883,54 +883,54 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// optional types (with has_value() == true)
testArgTypes<std::optional<double>>::test(
std::optional<double>(1.5), [] (const c10::optional<double>& v) {EXPECT_EQ(1.5, v.value());},
std::optional<double>(1.5), [] (const std::optional<double>& v) {EXPECT_EQ(1.5, v.value());},
std::optional<double>(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());},
"(float? a) -> float?");
testArgTypes<std::optional<int64_t>>::test(
std::optional<int64_t>(1), [] (const c10::optional<int64_t>& v) {EXPECT_EQ(1, v.value());},
std::optional<int64_t>(1), [] (const std::optional<int64_t>& v) {EXPECT_EQ(1, v.value());},
std::optional<int64_t>(2), [] (const IValue& v) {EXPECT_EQ(2, v.toInt());},
"(int? a) -> int?");
testArgTypes<std::optional<bool>>::test(
std::optional<bool>(true), [] (const c10::optional<bool>& v) {EXPECT_EQ(true, v.value());},
std::optional<bool>(true), [] (const std::optional<bool>& v) {EXPECT_EQ(true, v.value());},
std::optional<bool>(false), [] (const IValue& v) {EXPECT_EQ(false, v.toBool());},
"(bool? a) -> bool?");
testArgTypes<std::optional<bool>>::test(
std::optional<bool>(false), [] (const c10::optional<bool>& v) {EXPECT_EQ(false, v.value());},
std::optional<bool>(false), [] (const std::optional<bool>& v) {EXPECT_EQ(false, v.value());},
std::optional<bool>(true), [] (const IValue& v) {EXPECT_EQ(true, v.toBool());},
"(bool? a) -> bool?");
testArgTypes<std::optional<std::string>>::test(
std::optional<std::string>("string1"), [] (const c10::optional<std::string>& v) {EXPECT_EQ("string1", v.value());},
std::optional<std::string>("string1"), [] (const std::optional<std::string>& v) {EXPECT_EQ("string1", v.value());},
std::optional<std::string>("string2"), [] (const IValue& v) {EXPECT_EQ("string2", v.toStringRef());},
"(str? a) -> str?");
testArgTypes<std::optional<Tensor>>::test(
std::optional<Tensor>(dummyTensor(c10::DispatchKey::CPU)), [] (const c10::optional<Tensor>& v) {EXPECT_EQ(c10::DispatchKey::CPU, extractDispatchKey(v.value()));},
std::optional<Tensor>(dummyTensor(c10::DispatchKey::CPU)), [] (const std::optional<Tensor>& v) {EXPECT_EQ(c10::DispatchKey::CPU, extractDispatchKey(v.value()));},
std::optional<Tensor>(dummyTensor(c10::DispatchKey::CUDA)), [] (const IValue& v) {EXPECT_EQ(c10::DispatchKey::CUDA, extractDispatchKey(v.toTensor()));},
"(Tensor? a) -> Tensor?");
// optional types (with has_value() == false)
testArgTypes<std::optional<double>>::test(
std::optional<double>(c10::nullopt), [] (const c10::optional<double>& v) {EXPECT_FALSE(v.has_value());},
std::optional<double>(c10::nullopt), [] (const std::optional<double>& v) {EXPECT_FALSE(v.has_value());},
std::optional<double>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(float? a) -> float?");
testArgTypes<std::optional<int64_t>>::test(
std::optional<int64_t>(c10::nullopt), [] (const c10::optional<int64_t>& v) {EXPECT_FALSE(v.has_value());},
std::optional<int64_t>(c10::nullopt), [] (const std::optional<int64_t>& v) {EXPECT_FALSE(v.has_value());},
std::optional<int64_t>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(int? a) -> int?");
testArgTypes<std::optional<bool>>::test(
std::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
std::optional<bool>(c10::nullopt), [] (const std::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
std::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(bool? a) -> bool?");
testArgTypes<std::optional<bool>>::test(
std::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
std::optional<bool>(c10::nullopt), [] (const std::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
std::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(bool? a) -> bool?");
testArgTypes<std::optional<std::string>>::test(
std::optional<std::string>(c10::nullopt), [] (const c10::optional<std::string>& v) {EXPECT_FALSE(v.has_value());},
std::optional<std::string>(c10::nullopt), [] (const std::optional<std::string>& v) {EXPECT_FALSE(v.has_value());},
std::optional<std::string>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(str? a) -> str?");
testArgTypes<std::optional<Tensor>>::test(
std::optional<Tensor>(c10::nullopt), [] (const c10::optional<Tensor>& v) {EXPECT_FALSE(v.has_value());},
std::optional<Tensor>(c10::nullopt), [] (const std::optional<Tensor>& v) {EXPECT_FALSE(v.has_value());},
std::optional<Tensor>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(Tensor? a) -> Tensor?");
@ -1137,19 +1137,19 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// Test optional of list (with nullopt)
testArgTypes<std::optional<c10::List<int64_t>>>::test(
std::optional<c10::List<int64_t>>(c10::nullopt), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_FALSE(v.has_value());},
std::optional<c10::List<int64_t>>(c10::nullopt), [] (const std::optional<c10::List<int64_t>>& v) {EXPECT_FALSE(v.has_value());},
std::optional<c10::List<int64_t>>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
"(int[]? a) -> int[]?");
// Test optional of list (with empty list)
testArgTypes<std::optional<c10::List<int64_t>>>::test(
std::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_EQ(0, v.value().size());},
std::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const std::optional<c10::List<int64_t>>& v) {EXPECT_EQ(0, v.value().size());},
std::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const IValue& v) {EXPECT_EQ(0, v.to<c10::List<int64_t>>().size());},
"(int[]? a) -> int[]?");
// Test optional of list (with values)
testArgTypes<std::optional<c10::List<int64_t>>>::test(
std::optional<c10::List<int64_t>>(c10::List<int64_t>({1, 2})), [] (const c10::optional<c10::List<int64_t>>& v) {expectListEquals({1, 2}, v.value());},
std::optional<c10::List<int64_t>>(c10::List<int64_t>({1, 2})), [] (const std::optional<c10::List<int64_t>>& v) {expectListEquals({1, 2}, v.value());},
std::optional<c10::List<int64_t>>(c10::List<int64_t>({3, 4})), [] (const IValue& v) {expectListEquals({3, 4}, v.to<c10::List<int64_t>>());},
"(int[]? a) -> int[]?");

View File

@ -426,8 +426,8 @@ std::optional<int64_t> batch_dim_if_not_empty(const Tensor& t) {
}
fourOutputs linalg_lstsq_batch_rule(
const Tensor& self, std::optional<int64_t> self_bdim, const Tensor& b, c10::optional<int64_t> b_bdim,
std::optional<double> rcond, c10::optional<c10::string_view> driver) {
const Tensor& self, std::optional<int64_t> self_bdim, const Tensor& b, std::optional<int64_t> b_bdim,
std::optional<double> rcond, std::optional<c10::string_view> driver) {
TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "torch.linalg.lstsq: input must have at least 2 dimensions.");
TORCH_CHECK(rankWithoutBatchDim(b, b_bdim) >= 1, "torch.linalg.lstsq: other must have at least 1 dimension.");

View File

@ -850,13 +850,13 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_w
// work with dynamo anyway so we gain some buffer room to do wrong things here. The (reasonable) hope is that we will
// make native_batch_norm composite implicit within a few weeks and we can fix this before vmap works with dynamo.
static std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_batch(
const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps) {
return at::native_batch_norm(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, eps);
}
static std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats_batch(
const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
bool train, double momentum, double eps) {
return at::native_batch_norm(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps);
}

View File

@ -60,7 +60,7 @@ Tensor _functional_sym_constrain_range(
return dep_token.clone();
}
void sym_constrain_range_for_size(const Scalar& size, std::optional<int64_t> min, c10::optional<int64_t> max) {
void sym_constrain_range_for_size(const Scalar& size, std::optional<int64_t> min, std::optional<int64_t> max) {
int64_t min_val = min.has_value() ? min.value() : 0;
if (max.has_value() && max.value() <= 2) {
TORCH_CHECK(false, "Max value to constrain_range_for_size must be greater than 2. got: ", max.value());

View File

@ -1730,7 +1730,7 @@ static Tensor subvariable(const Tensor& var, int dim, int groups, int g) {
return result;
}
std::tuple<Tensor,Tensor,Tensor> _convolution_double_backward( const std::optional<Tensor>& ggI_opt, const c10::optional<Tensor>& ggW_r_opt, const c10::optional<Tensor>& ggb_opt,
std::tuple<Tensor,Tensor,Tensor> _convolution_double_backward( const std::optional<Tensor>& ggI_opt, const std::optional<Tensor>& ggW_r_opt, const std::optional<Tensor>& ggb_opt,
const Tensor& gO_r, const Tensor& weight_r, const Tensor& input,
IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_,
bool transposed_, IntArrayRef output_padding_, int64_t groups_,

View File

@ -132,7 +132,7 @@ static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMet
}
template<template<typename> class random_from_to_kernel, typename RNG>
at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, std::optional<int64_t> to_opt, c10::optional<Generator> generator) {
at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, std::optional<int64_t> to_opt, std::optional<Generator> generator) {
uint64_t range = 0;
auto iter = at::TensorIterator::borrowing_nullary_op(self);
if (to_opt.has_value()) {

View File

@ -24,8 +24,8 @@ void _fused_adagrad_kernel_cpu_(
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf) {
const std::optional<at::Tensor>& grad_scale,
const std::optional<at::Tensor>& found_inf) {
const float* grad_scale_ptr =
grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
const float* found_inf_ptr =

View File

@ -78,7 +78,7 @@ inline torch::List<std::optional<Tensor>> toListOfOptionalTensors(ArrayRef<IValu
torch::List<std::optional<Tensor>> result;
result.reserve(list.size());
for (const IValue& a : list) {
result.push_back(a.isTensor() ? std::optional<Tensor>(a.toTensor()) : c10::optional<Tensor>());
result.push_back(a.isTensor() ? std::optional<Tensor>(a.toTensor()) : std::optional<Tensor>());
}
return result;
}

View File

@ -359,7 +359,7 @@ Tensor& binary_cross_entropy_backward_out_cpu(const Tensor& grad, const Tensor&
return grad_input;
}
Tensor binary_cross_entropy_with_logits(const Tensor& input, const Tensor& target, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& pos_weight_opt, int64_t reduction) {
Tensor binary_cross_entropy_with_logits(const Tensor& input, const Tensor& target, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& pos_weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;

View File

@ -9,7 +9,7 @@ namespace at::native {
// In those cases, we will duplicate the signature here with non-symbolic ints, and also duplicate the C++ implementation.
TORCH_API at::Tensor reshape(const at::Tensor& self, at::IntArrayRef proposed_shape);
TORCH_API at::Tensor narrow(const at::Tensor& self, int64_t dim, int64_t start, int64_t length);
TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, std::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<at::Layout> layout=c10::nullopt, c10::optional<at::Device> device=c10::nullopt, c10::optional<bool> pin_memory=c10::nullopt, c10::optional<bool> is_coalesced=c10::nullopt);
TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, std::optional<at::ScalarType> dtype=c10::nullopt, std::optional<at::Layout> layout=c10::nullopt, std::optional<at::Device> device=c10::nullopt, std::optional<bool> pin_memory=c10::nullopt, std::optional<bool> is_coalesced=c10::nullopt);
TORCH_API at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const std::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
TORCH_API at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const std::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
// The below ops don't get a duplicated C++ implementation.

View File

@ -538,7 +538,7 @@ BatchNormBackend _select_batch_norm_backend(
// XXX: The indices of backends need to be kept synchronized between this function and its _backward.
// TODO: remove cudnn_enabled arg
std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t> _batch_norm_impl_index(
const Tensor& input, const std::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */,
const Tensor& input, const std::optional<Tensor>& weight_opt /* optional */, const std::optional<Tensor>& bias_opt /* optional */, const std::optional<Tensor>& running_mean_opt /* optional */, const std::optional<Tensor>& running_var_opt /* optional */,
bool training, double momentum, double eps, bool cudnn_enabled) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
@ -620,7 +620,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t> _batch_norm_impl_index(
std::tuple<Tensor, Tensor, Tensor> _batch_norm_impl_index_backward(
int64_t impl_index,
const Tensor& input, const Tensor& grad_output, const std::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, const c10::optional<Tensor>& save_mean_opt /* optional */, const c10::optional<Tensor>& save_var_transform_opt /* optional */,
const Tensor& input, const Tensor& grad_output, const std::optional<Tensor>& weight_opt /* optional */, const std::optional<Tensor>& running_mean_opt /* optional */, const std::optional<Tensor>& running_var_opt /* optional */, const std::optional<Tensor>& save_mean_opt /* optional */, const std::optional<Tensor>& save_var_transform_opt /* optional */,
bool train, double epsilon, std::array<bool, 3> output_mask, const Tensor &reservedSpace) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
@ -666,8 +666,8 @@ std::tuple<Tensor, Tensor, Tensor> _batch_norm_impl_index_backward(
// TODO: remove cudnn_enabled arg
Tensor batch_norm(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const std::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
bool training, double momentum, double eps, bool cudnn_enabled) {
const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();});
const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();});
@ -702,7 +702,7 @@ Tensor batch_norm(
}
Tensor instance_norm(
const Tensor& input, const std::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */,
const Tensor& input, const std::optional<Tensor>& weight_opt /* optional */, const std::optional<Tensor>& bias_opt /* optional */, const std::optional<Tensor>& running_mean_opt /* optional */, const std::optional<Tensor>& running_var_opt /* optional */,
bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
@ -740,7 +740,7 @@ Tensor instance_norm(
}
std::tuple<Tensor, Tensor> batch_norm_update_stats_cpu(
const Tensor& self, const std::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum) {
const Tensor& self, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, double momentum) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
@ -758,7 +758,7 @@ std::tuple<Tensor, Tensor> batch_norm_update_stats_cpu(
});
}
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cpu_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cpu_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
@ -801,7 +801,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cpu_out(const Tensor& self, con
return std::tuple<Tensor& ,Tensor&, Tensor&>(out, save_mean, save_var);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_cpu(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
std::tuple<Tensor, Tensor, Tensor> batch_norm_cpu(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
bool train, double momentum, double eps) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
@ -851,7 +851,7 @@ std::tuple<Tensor, Tensor, Tensor> batch_norm_cpu(const Tensor& self, const std:
}
std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_with_update_cpu(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
Tensor& running_mean, Tensor& running_var, double momentum, double eps) {
Tensor output, save_mean, save_var;
std::tie(output, save_mean, save_var) =
@ -861,7 +861,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_with_update_cpu(
}
std::tuple<Tensor&, Tensor&, Tensor&, Tensor&> _batch_norm_with_update_cpu_out(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
Tensor& running_mean, Tensor& running_var, double momentum, double eps,
Tensor& out, Tensor& save_mean, Tensor& save_var, Tensor& reserve) {
std::tie(out, save_mean, save_var) =
@ -871,8 +871,8 @@ std::tuple<Tensor&, Tensor&, Tensor&, Tensor&> _batch_norm_with_update_cpu_out(
std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_no_update(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const std::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
double momentum, double eps) {
const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();});
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
@ -884,41 +884,41 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_no_update(
}
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_cpu(
const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps) {
return batch_norm_cpu(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, eps);
}
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_no_stats_cpu(
const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
bool train, double momentum, double eps) {
return batch_norm_cpu(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps);
}
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_no_training(
const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
const Tensor& running_mean, const Tensor& running_var, double momentum, double eps) {
return at::_native_batch_norm_legit(self, weight_opt, bias_opt, const_cast<Tensor&>(running_mean), const_cast<Tensor&>(running_var), /*train=*/false, momentum, eps);
}
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_cpu_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) {
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_cpu_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) {
return batch_norm_cpu_out(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, eps, out, save_mean, save_var);
}
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_no_stats_cpu_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) {
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_no_stats_cpu_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) {
return batch_norm_cpu_out(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps, out, save_mean, save_var);
}
std::tuple<Tensor, Tensor, Tensor> _new_batch_norm_backward_cpu(
const Tensor& grad_output, const Tensor& input, const Tensor& weight,
const std::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
const std::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_var_opt,
const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_var_opt,
bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) {
return batch_norm_backward_cpu(grad_output, input, weight, running_mean_opt, running_var_opt, save_mean_opt, save_var_opt, update, eps, grad_input_mask);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cpu(const Tensor& grad_out, const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt,
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cpu(const Tensor& grad_out, const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_invstd_opt,
bool train, double eps, std::array<bool,3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);

View File

@ -1163,7 +1163,7 @@ bool _use_cudnn_rnn_flatten_weight() {
// NB: This a (composite) wrapper for _thnn_fused_lstm_cell_backward_impl.
// It duplicates the outputs of this function so the non-composite version doesn't have to.
// The point is so that we avoid triggering TensorImpl use count asserts in debug mode
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_backward( const std::optional<Tensor>& grad_hy_opt, const c10::optional<Tensor>& grad_cy_opt,
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_backward( const std::optional<Tensor>& grad_hy_opt, const std::optional<Tensor>& grad_cy_opt,
const Tensor& cx, const Tensor& cy,
const Tensor& workspace, bool has_bias) {
TORCH_INTERNAL_ASSERT(!GradMode::is_enabled());
@ -1523,7 +1523,7 @@ std::tuple<Tensor, Tensor, Tensor> lstm(
std::tuple<Tensor, Tensor> lstm_cell(
const Tensor& input, TensorList hx,
const Tensor& w_ih, const Tensor& w_hh, const std::optional<Tensor>& b_ih_opt, const c10::optional<Tensor>& b_hh_opt) {
const Tensor& w_ih, const Tensor& w_hh, const std::optional<Tensor>& b_ih_opt, const std::optional<Tensor>& b_hh_opt) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> b_ih_maybe_owned = at::borrow_from_optional_tensor(b_ih_opt);
const Tensor& b_ih = *b_ih_maybe_owned;
@ -1539,9 +1539,9 @@ std::tuple<Tensor, Tensor> lstm_cell(
}
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor>
_thnn_differentiable_lstm_cell_backward( const std::optional<Tensor>& grad_hy_opt, const c10::optional<Tensor>& grad_cy_opt,
_thnn_differentiable_lstm_cell_backward( const std::optional<Tensor>& grad_hy_opt, const std::optional<Tensor>& grad_cy_opt,
const Tensor& input_gates,
const Tensor& hidden_gates, const std::optional<Tensor>& input_bias_opt, const c10::optional<Tensor>& hidden_bias_opt,
const Tensor& hidden_gates, const std::optional<Tensor>& input_bias_opt, const std::optional<Tensor>& hidden_bias_opt,
const Tensor& cx,
const Tensor& cy) {
// See [Note: hacky wrapper removal for optional tensor]
@ -1597,7 +1597,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_differentiable_gru_cell
const Tensor& grad_hy,
const Tensor& input_gates,
const Tensor& hidden_gates,
const Tensor& hx, const std::optional<Tensor>& input_bias_opt, const c10::optional<Tensor>& hidden_bias_opt){
const Tensor& hx, const std::optional<Tensor>& input_bias_opt, const std::optional<Tensor>& hidden_bias_opt){
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt);
const Tensor& input_bias = *input_bias_maybe_owned;
@ -1637,7 +1637,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_differentiable_gru_cell
Tensor gru_cell(
const Tensor& input, const Tensor& hx,
const Tensor& w_ih, const Tensor& w_hh, const std::optional<Tensor>& b_ih_opt, const c10::optional<Tensor>& b_hh_opt) {
const Tensor& w_ih, const Tensor& w_hh, const std::optional<Tensor>& b_ih_opt, const std::optional<Tensor>& b_hh_opt) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> b_ih_maybe_owned = at::borrow_from_optional_tensor(b_ih_opt);
const Tensor& b_ih = *b_ih_maybe_owned;
@ -1651,7 +1651,7 @@ Tensor gru_cell(
Tensor rnn_tanh_cell(
const Tensor& input, const Tensor& hx,
const Tensor& w_ih, const Tensor& w_hh, const std::optional<Tensor>& b_ih_opt, const c10::optional<Tensor>& b_hh_opt) {
const Tensor& w_ih, const Tensor& w_hh, const std::optional<Tensor>& b_ih_opt, const std::optional<Tensor>& b_hh_opt) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> b_ih_maybe_owned = at::borrow_from_optional_tensor(b_ih_opt);
const Tensor& b_ih = *b_ih_maybe_owned;
@ -1665,7 +1665,7 @@ Tensor rnn_tanh_cell(
Tensor rnn_relu_cell(
const Tensor& input, const Tensor& hx,
const Tensor& w_ih, const Tensor& w_hh, const std::optional<Tensor>& b_ih_opt, const c10::optional<Tensor>& b_hh_opt) {
const Tensor& w_ih, const Tensor& w_hh, const std::optional<Tensor>& b_ih_opt, const std::optional<Tensor>& b_hh_opt) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> b_ih_maybe_owned = at::borrow_from_optional_tensor(b_ih_opt);
const Tensor& b_ih = *b_ih_maybe_owned;

View File

@ -869,7 +869,7 @@ Tensor cummaxmin_backward(const Tensor& grad, const Tensor& input, const Tensor&
return result.scatter_add_(dim, indices, grad);
}
static Tensor prepend_append_on_dim(const Tensor& self, const std::optional<Tensor>& prepend, const c10::optional<Tensor>& append, int64_t dim) {
static Tensor prepend_append_on_dim(const Tensor& self, const std::optional<Tensor>& prepend, const std::optional<Tensor>& append, int64_t dim) {
// Helper for diff that handles prepending and appending when at least one is present
TORCH_INTERNAL_ASSERT(prepend.has_value() || append.has_value(), "either prepend or append must be have value");
if (!prepend.has_value() && append.has_value()) {
@ -902,7 +902,7 @@ static inline void diff_check_compatible_shape(const Tensor& self, const std::op
}
}
static inline void diff_check(const Tensor& self, int64_t n, int64_t dim, const std::optional<Tensor>&prepend, const c10::optional<Tensor>& append) {
static inline void diff_check(const Tensor& self, int64_t n, int64_t dim, const std::optional<Tensor>&prepend, const std::optional<Tensor>& append) {
// Helper for diff that checks whether its parameters are valid
TORCH_CHECK(
self.dim() >= 1,
@ -943,7 +943,7 @@ static inline Tensor diff_helper(const Tensor& self, int64_t n, int64_t dim) {
return result;
}
Tensor diff(const Tensor& self, int64_t n, int64_t dim, const std::optional<Tensor>& prepend, const c10::optional<Tensor>& append) {
Tensor diff(const Tensor& self, int64_t n, int64_t dim, const std::optional<Tensor>& prepend, const std::optional<Tensor>& append) {
diff_check(self, n, dim, prepend, append);
if ((!prepend.has_value() && !append.has_value()) || n == 0) {
return diff_helper(self, n, dim);
@ -987,7 +987,7 @@ static inline Tensor& diff_out_helper(const Tensor& self, int64_t n, int64_t dim
return result;
}
Tensor& diff_out(const Tensor& self, int64_t n, int64_t dim, const std::optional<Tensor>& prepend, const c10::optional<Tensor>& append, Tensor& result) {
Tensor& diff_out(const Tensor& self, int64_t n, int64_t dim, const std::optional<Tensor>& prepend, const std::optional<Tensor>& append, Tensor& result) {
diff_check(self, n, dim, prepend, append);
if ((!prepend.has_value() && !append.has_value()) || n == 0) {
return diff_out_helper(self, n, dim, result);
@ -1146,7 +1146,7 @@ std::vector<Tensor> gradient(const Tensor& self, const Scalar& unit_size, IntArr
return gradient_helper_float(self, spacing, dim, edge_order);
}
std::vector<Tensor> gradient(const Tensor& self, const std::optional<Scalar>& unit_size, c10::optional<int64_t> dim, int64_t edge_order) {
std::vector<Tensor> gradient(const Tensor& self, const std::optional<Scalar>& unit_size, std::optional<int64_t> dim, int64_t edge_order) {
const auto processed_dim = gradient_dim_preprocess(self, dim);
// When unit_size not provided, it is always assumed to be equal to 1.
// When dim has integer value it implies we are looking for gradient in the specific direction, however when

View File

@ -587,7 +587,7 @@ Tensor log_softmax(const Tensor& self, Dimname dim, optional<ScalarType> dtype)
return at::log_softmax(self, dimname_to_position(self, dim), dtype);
}
Tensor masked_softmax_cpu(const Tensor& input_, const Tensor& mask_, const std::optional<int64_t> dim_, const c10::optional<int64_t> mask_type_) {
Tensor masked_softmax_cpu(const Tensor& input_, const Tensor& mask_, const std::optional<int64_t> dim_, const std::optional<int64_t> mask_type_) {
auto mask = mask_.contiguous();
auto mask_type = mask_type_; // Mask type might get transformed below

View File

@ -754,27 +754,27 @@ TORCH_IMPL_FUNC(clamp_min_Tensor_out)
}
// Implements the "clip" alias for clamp
Tensor& clip_out(const Tensor& self, const std::optional<Scalar>& min, const c10::optional<Scalar>& max, Tensor& result) {
Tensor& clip_out(const Tensor& self, const std::optional<Scalar>& min, const std::optional<Scalar>& max, Tensor& result) {
return at::clamp_outf(self, min, max, result);
}
Tensor& clip_out(const Tensor& self, const std::optional<Tensor>& min, const c10::optional<Tensor>& max, Tensor& result) {
Tensor& clip_out(const Tensor& self, const std::optional<Tensor>& min, const std::optional<Tensor>& max, Tensor& result) {
return at::clamp_outf(self, min, max, result);
}
Tensor clip(const Tensor& self, const std::optional<Scalar>& min, const c10::optional<Scalar>& max) {
Tensor clip(const Tensor& self, const std::optional<Scalar>& min, const std::optional<Scalar>& max) {
return at::clamp(self, min, max);
}
Tensor clip(const Tensor& self, const std::optional<Tensor>& min, const c10::optional<Tensor>& max) {
Tensor clip(const Tensor& self, const std::optional<Tensor>& min, const std::optional<Tensor>& max) {
return at::clamp(self, min, max);
}
Tensor& clip_(Tensor& self, const std::optional<Scalar>& min, const c10::optional<Scalar>& max) {
Tensor& clip_(Tensor& self, const std::optional<Scalar>& min, const std::optional<Scalar>& max) {
return at::clamp_(self, min, max);
}
Tensor& clip_(Tensor& self, const std::optional<Tensor>& min, const c10::optional<Tensor>& max) {
Tensor& clip_(Tensor& self, const std::optional<Tensor>& min, const std::optional<Tensor>& max) {
return at::clamp_(self, min, max);
}

View File

@ -598,7 +598,7 @@ Tensor to_mkldnn_backward(const Tensor& grad, const Tensor& input_) {
return grad.to_dense(input_.scalar_type());
}
Tensor to_dense(const Tensor& tensor, std::optional<c10::ScalarType> dtype, c10::optional<bool> masked_grad) {
Tensor to_dense(const Tensor& tensor, std::optional<c10::ScalarType> dtype, std::optional<bool> masked_grad) {
if (tensor.layout() == c10::kSparse) {
return tensor._to_dense(dtype, masked_grad);
}
@ -621,7 +621,7 @@ Tensor to_dense(const Tensor& tensor, std::optional<c10::ScalarType> dtype, c10:
return tensor;
}
Tensor sparse_to_dense(const Tensor& self, std::optional<ScalarType> dtype, c10::optional<bool> masked) {
Tensor sparse_to_dense(const Tensor& self, std::optional<ScalarType> dtype, std::optional<bool> masked) {
TORCH_CHECK(
!dtype.has_value(), "dtype argument is not supported by sparse_to_dense");
Tensor dst = at::zeros(self.sizes(), self.options().layout(kStrided));
@ -954,7 +954,7 @@ void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self,
}
static inline
void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim_opt) {
void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, std::optional<int64_t> dense_dim_opt) {
auto layout_from = self.layout();
auto layout_to = layout.value_or(kSparse);
@ -1109,7 +1109,7 @@ static Tensor dense_to_sparse_compressed(const Tensor& self, const Tensor& self_
self.options().layout(target_layout));
}
Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim_opt) {
Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, std::optional<int64_t> dense_dim_opt) {
auto layout_to = layout.value_or(kSparse);
TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "dense_to_sparse: unexpected same input and output layout");
TORCH_INTERNAL_ASSERT(self.layout() == mask.layout(),
@ -1165,7 +1165,7 @@ Tensor dense_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize, std::optio
return dense_to_sparse_compressed<Layout::SparseBsc>(self, self != 0, blocksize, dense_dim_opt);
}
Tensor dense_to_sparse(const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim_opt) {
Tensor dense_to_sparse(const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, std::optional<int64_t> dense_dim_opt) {
auto layout_to = layout.value_or(kSparse);
TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "dense_to_sparse: unexpected same input and output layout");
_to_sparse_check_arguments("dense_to_sparse", self, layout, blocksize, dense_dim_opt);
@ -1909,7 +1909,7 @@ Tensor sparse_compressed_to_sparse(const Tensor& self, const int64_t sparse_dim)
return at::native::_sparse_coo_tensor_unsafe(indices, values, self.sizes())._coalesced_(coalesced);
}
Tensor sparse_compressed_to_sparse(const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim_opt) {
Tensor sparse_compressed_to_sparse(const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, std::optional<int64_t> dense_dim_opt) {
auto layout_to = layout.value_or(kSparse);
TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "sparse_compressed_to_sparse: unexpected same input and output layout");
_to_sparse_check_arguments("sparse_compressed_to_sparse", self, layout_to, blocksize, dense_dim_opt);
@ -1936,7 +1936,7 @@ Tensor sparse_compressed_to_sparse(const Tensor& self, std::optional<c10::Layout
return Tensor{};
}
Tensor sparse_coo_to_sparse(const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim_opt) {
Tensor sparse_coo_to_sparse(const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, std::optional<int64_t> dense_dim_opt) {
auto layout_to = layout.value_or(kSparse);
TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "sparse_coo_to_sparse: unexpected same input and output layout");
_to_sparse_check_arguments("sparse_coo_to_sparse", self, layout_to, blocksize, dense_dim_opt);
@ -1969,7 +1969,7 @@ Tensor to_sparse(const Tensor& self, const int64_t sparse_dim) {
return self._to_sparse(sparse_dim);
}
Tensor to_sparse(const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim_opt) {
Tensor to_sparse(const Tensor& self, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, std::optional<int64_t> dense_dim_opt) {
auto layout_to = layout.value_or(kSparse);
if (self.layout() == layout_to) {
_to_sparse_check_arguments("to_sparse", self, layout, blocksize, dense_dim_opt);
@ -2026,7 +2026,7 @@ Tensor to_meta(const Tensor& tensor) {
}
return out;
}
std::optional<Tensor> to_meta(const c10::optional<Tensor>& tensor) {
std::optional<Tensor> to_meta(const std::optional<Tensor>& tensor) {
if (tensor.has_value()) {
return to_meta(*tensor);
}

View File

@ -18,9 +18,9 @@ bool to_will_alias(
std::optional<c10::MemoryFormat> optional_memory_format);
Tensor to_meta(const Tensor& tensor);
std::optional<Tensor> to_meta(const c10::optional<Tensor>& tensor);
std::optional<Tensor> to_meta(const std::optional<Tensor>& tensor);
std::vector<Tensor> to_meta(at::ITensorListRef t_list);
Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim_opt);
Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, std::optional<c10::Layout> layout, OptionalIntArrayRef blocksize, std::optional<int64_t> dense_dim_opt);
} // namespace native
} // namespace at

View File

@ -252,8 +252,8 @@ Tensor polar(const Tensor& abs, const Tensor& angle) {
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ empty ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor empty_cpu(IntArrayRef size, std::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt,
std::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt) {
Tensor empty_cpu(IntArrayRef size, std::optional<ScalarType> dtype_opt, std::optional<Layout> layout_opt,
std::optional<Device> device_opt, std::optional<bool> pin_memory_opt, std::optional<c10::MemoryFormat> memory_format_opt) {
Tensor result = at::detail::empty_cpu(size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt);
// See Note [Enabling Deterministic Operations]
if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms() && at::globalContext().deterministicFillUninitializedMemory())) {
@ -286,7 +286,7 @@ Tensor empty_names(
}
Tensor empty_permuted_symint(SymIntArrayRef size, IntArrayRef physical_layout, std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt
std::optional<Layout> layout_opt, std::optional<Device> device_opt, std::optional<bool> pin_memory_opt
) {
// size is logical; aka, the output size you'll get from the operation overall
//
@ -325,7 +325,7 @@ Tensor empty_permuted_symint(SymIntArrayRef size, IntArrayRef physical_layout, s
}
Tensor empty_strided_cpu(IntArrayRef size, IntArrayRef stride, std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
std::optional<Layout> layout_opt, std::optional<Device> device_opt, std::optional<bool> pin_memory_opt) {
Tensor result = at::detail::empty_strided_cpu(size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt);
// See Note [Enabling Deterministic Operations]
if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms() && at::globalContext().deterministicFillUninitializedMemory())) {
@ -1186,7 +1186,7 @@ Tensor range(
Tensor tril_indices_cpu(
int64_t row, int64_t col, int64_t offset, std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
std::optional<Layout> layout_opt, std::optional<Device> device_opt, std::optional<bool> pin_memory_opt) {
if (!dtype_opt.has_value()) {
dtype_opt = ScalarType::Long;
}
@ -1236,7 +1236,7 @@ Tensor tril_indices_cpu(
Tensor triu_indices_cpu(
int64_t row, int64_t col, int64_t offset, std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
std::optional<Layout> layout_opt, std::optional<Device> device_opt, std::optional<bool> pin_memory_opt) {
if (!dtype_opt.has_value()) {
dtype_opt = ScalarType::Long;
}
@ -1717,7 +1717,7 @@ Tensor tensor_complex_backend(ArrayRef<T> values, const TensorOptions& options)
return at::detail::tensor_complex_backend(values, options);
}
Tensor from_file(c10::string_view filename, std::optional<bool> shared, c10::optional<int64_t> size,
Tensor from_file(c10::string_view filename, std::optional<bool> shared, std::optional<int64_t> size,
std::optional<ScalarType> dtype,
std::optional<Layout> layout,
std::optional<Device> device,

View File

@ -4013,7 +4013,7 @@ at::Tensor clone_preserve_strides(const at::Tensor& self) {
}
at::Tensor slice_scatter(const at::Tensor& self, const at::Tensor& src, int64_t dim, std::optional<int64_t> start, c10::optional<int64_t> end, int64_t step) {
at::Tensor slice_scatter(const at::Tensor& self, const at::Tensor& src, int64_t dim, std::optional<int64_t> start, std::optional<int64_t> end, int64_t step) {
// See Note [*_scatter ops preserve strides]
auto output = clone_preserve_strides(self);
auto slice = output.slice(dim, start, end, step);

View File

@ -57,7 +57,7 @@ TORCH_API c10::SmallVector<int64_t, 3> compute_output_size(
at::OptionalIntArrayRef output_size,
std::optional<c10::ArrayRef<double>> scale_factors);
inline std::optional<double> get_scale_value(c10::optional<c10::ArrayRef<double>> scales, int idx) {
inline std::optional<double> get_scale_value(std::optional<c10::ArrayRef<double>> scales, int idx) {
if (!scales) {
return c10::nullopt;
}

View File

@ -23,7 +23,7 @@
namespace at::meta {
TORCH_META_FUNC(upsample_bicubic2d) (
const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional<double> scales_h, c10::optional<double> scales_w
const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional<double> scales_h, std::optional<double> scales_w
) {
auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size);
@ -62,7 +62,7 @@ TORCH_META_FUNC(upsample_bicubic2d_backward) (
}
TORCH_META_FUNC(_upsample_bicubic2d_aa) (
const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional<double> scales_h, c10::optional<double> scales_w
const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional<double> scales_h, std::optional<double> scales_w
) {
auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size);

View File

@ -24,7 +24,7 @@
namespace at::meta {
TORCH_META_FUNC(upsample_bilinear2d) (
const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional<double> scales_h, c10::optional<double> scales_w
const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional<double> scales_h, std::optional<double> scales_w
) {
auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size);
@ -63,7 +63,7 @@ TORCH_META_FUNC(upsample_bilinear2d_backward) (
}
TORCH_META_FUNC(_upsample_bilinear2d_aa) (
const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional<double> scales_h, c10::optional<double> scales_w
const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional<double> scales_h, std::optional<double> scales_w
) {
auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size);

View File

@ -22,7 +22,7 @@
namespace at::meta {
TORCH_META_FUNC(upsample_nearest2d) (
const Tensor& input, IntArrayRef output_size, std::optional<double> scales_h, c10::optional<double> scales_w
const Tensor& input, IntArrayRef output_size, std::optional<double> scales_h, std::optional<double> scales_w
) {
auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size);
@ -36,7 +36,7 @@ TORCH_META_FUNC(upsample_nearest2d) (
}
TORCH_META_FUNC(_upsample_nearest_exact2d) (
const Tensor& input, IntArrayRef output_size, std::optional<double> scales_h, c10::optional<double> scales_w
const Tensor& input, IntArrayRef output_size, std::optional<double> scales_h, std::optional<double> scales_w
) {
auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size);

View File

@ -24,7 +24,7 @@
namespace at::native {
void _backward(const Tensor& self, TensorList inputs, const std::optional<Tensor>& gradient_opt, c10::optional<bool> keep_graph, bool create_graph) {
void _backward(const Tensor& self, TensorList inputs, const std::optional<Tensor>& gradient_opt, std::optional<bool> keep_graph, bool create_graph) {
return self._backward(inputs, gradient_opt, keep_graph, create_graph);
}

View File

@ -432,7 +432,7 @@ void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var,
}
}
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined());
const bool has_running_var = (running_var_opt.has_value() && running_var_opt->defined());
TORCH_CHECK(has_running_mean == has_running_var);
@ -458,7 +458,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, co
return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) {
std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) {
auto output = at::empty_like(self);
int64_t n_input = self.size(1);
auto options = self.options().dtype(
@ -482,7 +482,7 @@ std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const std
}
std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_with_update_cuda(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
Tensor& running_mean, Tensor& running_var, double momentum, double eps) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
@ -507,7 +507,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_with_update_cuda(
}
std::tuple<Tensor&, Tensor&, Tensor&, Tensor&> _batch_norm_with_update_cuda_out(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
Tensor& running_mean, Tensor& running_var, double momentum, double eps,
Tensor& out, Tensor& save_mean, Tensor& save_var, Tensor& reserve) {
// See [Note: hacky wrapper removal for optional tensor]
@ -529,26 +529,26 @@ std::tuple<Tensor&, Tensor&, Tensor&, Tensor&> _batch_norm_with_update_cuda_out(
return std::tuple<Tensor&, Tensor&, Tensor&, Tensor&>(out, save_mean, save_var, reserve);
}
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_cuda(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon) {
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_cuda(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon) {
return batch_norm_cuda(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, epsilon);
}
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_no_stats_cuda(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, bool train, double momentum, double epsilon) {
std::tuple<Tensor, Tensor, Tensor> _batch_norm_legit_no_stats_cuda(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, bool train, double momentum, double epsilon) {
return batch_norm_cuda(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, epsilon);
}
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_cuda_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_cuda_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
return batch_norm_cuda_out(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, epsilon, output, save_mean, save_invstd);
}
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_no_stats_cuda_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
std::tuple<Tensor&, Tensor&, Tensor&> _batch_norm_legit_no_stats_cuda_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
return batch_norm_cuda_out(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, epsilon, output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> _new_batch_norm_backward_cuda(
const Tensor& grad_output, const Tensor& input, const Tensor& weight,
const std::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
const std::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_var_opt,
const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_var_opt,
bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) {
const Tensor& dummy_bias = at::empty(1);
const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();});
@ -567,7 +567,7 @@ std::tuple<Tensor, Tensor, Tensor> _new_batch_norm_backward_cuda(
}
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) {
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt);
c10::MaybeOwned<Tensor> save_mean = at::borrow_from_optional_tensor(save_mean_opt);
@ -682,7 +682,7 @@ Tensor batch_norm_elemt_cuda(
return output;
}
Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) {
// FIXME: Epsilon parameter isn't required, we don't take the reciprocal
batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd);
@ -690,7 +690,7 @@ Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const std::optional<Tensor
}
// accepting input(self) here to determine template data types, since running_mean/running_var are optional
std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const std::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) {
std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
@ -704,7 +704,7 @@ std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, cons
std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda(
const Tensor& self, const Tensor& mean, const Tensor& invstd, const std::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) {
const Tensor& self, const Tensor& mean, const Tensor& invstd, const std::optional<Tensor>& running_mean_opt /* optional */, const std::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;

View File

@ -516,7 +516,7 @@ void gru_backward_impl(const Tensor& grad_hy, const Tensor& workspace,
std::tuple<Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_cuda(
const Tensor& input_gates, const Tensor& hidden_gates,
const Tensor& cx, const std::optional<Tensor>& input_bias_opt, const c10::optional<Tensor>& hidden_bias_opt) {
const Tensor& cx, const std::optional<Tensor>& input_bias_opt, const std::optional<Tensor>& hidden_bias_opt) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt);
const Tensor& input_bias = *input_bias_maybe_owned;
@ -564,7 +564,7 @@ void checkLSTMBackwardSizes(const TensorArg& grad_hy, const TensorArg& grad_cy,
checkNumel(c, workspace, exp_size[0] * exp_size[1] * 4);
}
std::tuple<Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_backward_impl_cuda( const std::optional<Tensor>& grad_hy_opt, const c10::optional<Tensor>& grad_cy_opt,
std::tuple<Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_backward_impl_cuda( const std::optional<Tensor>& grad_hy_opt, const std::optional<Tensor>& grad_cy_opt,
const Tensor& cx, const Tensor& cy,
const Tensor& workspace, bool has_bias) {
// See [Note: hacky wrapper removal for optional tensor]
@ -602,7 +602,7 @@ static constexpr int64_t GRU_WORKSPACE_MULTIPLIER = 5;
std::tuple<Tensor, Tensor> _thnn_fused_gru_cell_cuda(
const Tensor& input_gates, const Tensor& hidden_gates,
const Tensor& hx, const std::optional<Tensor>& input_bias_opt, const c10::optional<Tensor>& hidden_bias_opt) {
const Tensor& hx, const std::optional<Tensor>& input_bias_opt, const std::optional<Tensor>& hidden_bias_opt) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt);
const Tensor& input_bias = *input_bias_maybe_owned;

View File

@ -40,7 +40,7 @@ __global__ void randperm_handle_duplicate_keys_kernel(T *keys, scalar_t *data, T
// See note [Algorithm of randperm]
template<typename T, typename scalar_t>
void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, c10::optional<at::Generator> &gen_) {
void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, std::optional<at::Generator> &gen_) {
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(gen_, at::cuda::detail::getDefaultCUDAGenerator());
int64_t counter_offset = n;
at::PhiloxCudaState rng_engine_inputs;

View File

@ -1113,7 +1113,7 @@ TORCH_IMPL_FUNC(softmax_backward_cuda_out)
host_softmax_backward<SoftMaxBackwardEpilogue, false>(tmp, output, dim, half_to_float, grad_input);
}
Tensor masked_softmax_cuda(const Tensor& input_, const Tensor& mask_, const std::optional<int64_t> dim_, const c10::optional<int64_t> mask_type_) {
Tensor masked_softmax_cuda(const Tensor& input_, const Tensor& mask_, const std::optional<int64_t> dim_, const std::optional<int64_t> mask_type_) {
Tensor output = at::empty_like(input_, input_.options());
TORCH_CHECK(mask_.scalar_type() == ScalarType::Bool, "Mask should be a boolean tensor");

View File

@ -51,7 +51,7 @@ Tensor& eye_out_cuda(int64_t n, int64_t m, Tensor& result) {
return result;
}
Tensor empty_cuda(IntArrayRef size, std::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt) {
Tensor empty_cuda(IntArrayRef size, std::optional<ScalarType> dtype_opt, std::optional<Layout> layout_opt, std::optional<Device> device_opt, std::optional<bool> pin_memory_opt, std::optional<c10::MemoryFormat> memory_format_opt) {
Tensor result = at::detail::empty_cuda(size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt);
// See Note [Enabling Deterministic Operations]
if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms() && at::globalContext().deterministicFillUninitializedMemory())) {
@ -77,7 +77,7 @@ Tensor _efficientzerotensor_cuda(IntArrayRef size,
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, std::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, std::optional<ScalarType> dtype_opt, std::optional<Layout> layout_opt, std::optional<Device> device_opt, std::optional<bool> pin_memory_opt) {
Tensor result = at::detail::empty_strided_cuda(size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt);
// See Note [Enabling Deterministic Operations]
if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms() && at::globalContext().deterministicFillUninitializedMemory())) {
@ -275,7 +275,7 @@ void tril_indices_kernel(scalar_t * tensor,
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
std::optional<Layout> layout_opt, std::optional<Device> device_opt, std::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto tril_size = get_tril_size(row, col, offset);
@ -351,7 +351,7 @@ void triu_indices_kernel(scalar_t * tensor,
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
std::optional<Layout> layout_opt, std::optional<Device> device_opt, std::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);

View File

@ -17,13 +17,13 @@ namespace upsample {
TORCH_API c10::SmallVector<int64_t, 3> compute_output_size(
c10::IntArrayRef input_size, // Full input tensor size.
at::OptionalIntArrayRef output_size,
c10::optional<c10::ArrayRef<double>> scale_factors);
std::optional<c10::ArrayRef<double>> scale_factors);
} // namespace upsample
namespace upsample_cuda {
// TODO: Remove duplication with Upsample.h (CPU).
inline c10::optional<double> get_scale_value(c10::optional<c10::ArrayRef<double>> scales, int idx) {
inline std::optional<double> get_scale_value(std::optional<c10::ArrayRef<double>> scales, int idx) {
if (!scales) {
return nullopt;
}
@ -73,7 +73,7 @@ __device__ inline scalar_t max(scalar_t a, scalar_t b) {
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename accscalar_t>
__host__ __forceinline__ static accscalar_t compute_scales_value(
const c10::optional<double> scale,
const std::optional<double> scale,
int64_t src_size,
int64_t dst_size) {
// FIXME: remove magic > 0 after we ensure no models were serialized with -1 defaults.
@ -84,7 +84,7 @@ __host__ __forceinline__ static accscalar_t compute_scales_value(
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename accscalar_t>
__host__ __forceinline__ static accscalar_t compute_scales_value_backwards(
const c10::optional<double> scale,
const std::optional<double> scale,
int64_t src_size,
int64_t dst_size) {
// FIXME: remove magic > 0 after we ensure no models were serialized with -1 defaults.
@ -97,7 +97,7 @@ __host__ __forceinline__ static accscalar_t area_pixel_compute_scale(
int input_size,
int output_size,
bool align_corners,
const c10::optional<double> scale) {
const std::optional<double> scale) {
if(align_corners) {
if(output_size > 1) {
return (accscalar_t)(input_size - 1) / (output_size - 1);

View File

@ -17,8 +17,8 @@ void _fused_adam_amsgrad_cuda_impl_(
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf);
const std::optional<at::Tensor>& grad_scale,
const std::optional<at::Tensor>& found_inf);
void _fused_adam_amsgrad_cuda_impl_(
at::TensorList params,
@ -33,8 +33,8 @@ void _fused_adam_amsgrad_cuda_impl_(
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf);
const std::optional<at::Tensor>& grad_scale,
const std::optional<at::Tensor>& found_inf);
} // namespace native
} // namespace at

View File

@ -16,8 +16,8 @@ void _fused_adam_cuda_impl_(
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf);
const std::optional<at::Tensor>& grad_scale,
const std::optional<at::Tensor>& found_inf);
void _fused_adam_cuda_impl_(
at::TensorList params,
@ -31,8 +31,8 @@ void _fused_adam_cuda_impl_(
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf);
const std::optional<at::Tensor>& grad_scale,
const std::optional<at::Tensor>& found_inf);
} // namespace native
} // namespace at

View File

@ -17,8 +17,8 @@ void _fused_adamw_amsgrad_cuda_impl_(
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf);
const std::optional<at::Tensor>& grad_scale,
const std::optional<at::Tensor>& found_inf);
void _fused_adamw_amsgrad_cuda_impl_(
at::TensorList params,
@ -33,8 +33,8 @@ void _fused_adamw_amsgrad_cuda_impl_(
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf);
const std::optional<at::Tensor>& grad_scale,
const std::optional<at::Tensor>& found_inf);
} // namespace native
} // namespace at

View File

@ -16,8 +16,8 @@ void _fused_adamw_cuda_impl_(
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf);
const std::optional<at::Tensor>& grad_scale,
const std::optional<at::Tensor>& found_inf);
void _fused_adamw_cuda_impl_(
at::TensorList params,
@ -31,8 +31,8 @@ void _fused_adamw_cuda_impl_(
const double weight_decay,
const double eps,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf);
const std::optional<at::Tensor>& grad_scale,
const std::optional<at::Tensor>& found_inf);
} // namespace native
} // namespace at

View File

@ -74,7 +74,7 @@ void layer_norm_cpu_out(
std::tuple<Tensor, Tensor, Tensor> layer_norm_cpu(
const Tensor& input,
IntArrayRef normalized_shape, const std::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
IntArrayRef normalized_shape, const std::optional<Tensor>& weight_opt /* optional */, const std::optional<Tensor>& bias_opt /* optional */,
double eps) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
@ -186,7 +186,7 @@ std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cpu(
Tensor layer_norm_symint(
const Tensor& input,
c10::SymIntArrayRef normalized_shape, const std::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
c10::SymIntArrayRef normalized_shape, const std::optional<Tensor>& weight_opt /* optional */, const std::optional<Tensor>& bias_opt /* optional */,
double eps,
bool /* cudnn_enable, deprecated */) {
// See [Note: hacky wrapper removal for optional tensor]
@ -204,7 +204,7 @@ DEFINE_DISPATCH(LayerNormBackwardKernel);
// Ported from pytorch/xla repo
std::tuple<Tensor, Tensor, Tensor> math_native_layer_norm(
const Tensor& input,
IntArrayRef normalized_shape, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
IntArrayRef normalized_shape, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
double eps) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);

View File

@ -22,13 +22,13 @@ namespace at { namespace native {
// See Note [ATen preprocessor philosophy]
std::tuple<Tensor, Tensor, Tensor> miopen_batch_norm(
const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
bool training, double exponential_average_factor, double epsilon) {
AT_ERROR("miopen_batch_norm: ATen not compiled with MIOpen support");
}
std::tuple<Tensor, Tensor, Tensor> miopen_batch_norm_backward(
const Tensor& input, const Tensor& grad_output, const Tensor& weight, const std::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_var_opt,
const Tensor& input, const Tensor& grad_output, const Tensor& weight, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_var_opt,
double epsilon) {
AT_ERROR("miopen_batch_norm_backward: ATen not compiled with MIOpen support");
}
@ -58,7 +58,7 @@ Tensor expandScale(const Tensor& t, int64_t dim) {
} // namespace
std::tuple<Tensor, Tensor, Tensor> miopen_batch_norm(
const Tensor& input_t, const Tensor& weight_t, const std::optional<Tensor>& bias_t_opt, const c10::optional<Tensor>& running_mean_t_opt, const c10::optional<Tensor>& running_var_t_opt,
const Tensor& input_t, const Tensor& weight_t, const std::optional<Tensor>& bias_t_opt, const std::optional<Tensor>& running_mean_t_opt, const std::optional<Tensor>& running_var_t_opt,
bool training, double exponential_average_factor, double epsilon)
{
// See [Note: hacky wrapper removal for optional tensor]

View File

@ -122,7 +122,7 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_depthwise_convolution_backwa
at::Tensor miopen_convolution_add_relu(
const at::Tensor& input, const at::Tensor& weight, const at::Tensor& z,
const std::optional<Scalar>& alpha, const c10::optional<Tensor>& bias, IntArrayRef stride,
const std::optional<Scalar>& alpha, const std::optional<Tensor>& bias, IntArrayRef stride,
IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
AT_ERROR("miopen_convolution_add_relu: ATen not compiled with MIOpen support");
}

View File

@ -39,7 +39,7 @@ namespace at { namespace native {
std::tuple<Tensor, Tensor, Tensor, std::vector<Tensor>> miopen_rnn_backward(
const Tensor& input, TensorList weight, int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, const std::optional<Tensor>& cx_opt,
const Tensor& output, const std::optional<Tensor>& grad_output_r_opt, const c10::optional<Tensor>& grad_hy_r_opt, const c10::optional<Tensor>& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first,
const Tensor& output, const std::optional<Tensor>& grad_output_r_opt, const std::optional<Tensor>& grad_hy_r_opt, const std::optional<Tensor>& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first,
double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const std::optional<Tensor>& dropout_state_opt,
const Tensor& reserve, std::array<bool, 4> output_mask
) {
@ -759,7 +759,7 @@ std::vector<Tensor> miopen_rnn_backward_weight(
std::tuple<Tensor, Tensor, Tensor, std::vector<Tensor>> miopen_rnn_backward(
const Tensor& input, TensorList weight, int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, const std::optional<Tensor>& cx_opt,
const Tensor& output, const std::optional<Tensor>& grad_output_r_opt, const c10::optional<Tensor>& grad_hy_r_opt, const c10::optional<Tensor>& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first,
const Tensor& output, const std::optional<Tensor>& grad_output_r_opt, const std::optional<Tensor>& grad_hy_r_opt, const std::optional<Tensor>& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first,
double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const std::optional<Tensor>& dropout_state_opt,
const Tensor& reserve, std::array<bool, 4> output_mask
) {

View File

@ -61,7 +61,7 @@ ideep::tensor::data_type get_mkldnn_dtype(ScalarType type) {
}
}
Tensor new_with_itensor_mkldnn(ideep::tensor&& it, std::optional<ScalarType> dtype, c10::optional<Device> device) {
Tensor new_with_itensor_mkldnn(ideep::tensor&& it, std::optional<ScalarType> dtype, std::optional<Device> device) {
// NOTE: int32_t dims from ideep::tensor but sizes needs int64_t
// TODO: support int64_t dims in ideep::tensor to avoid extra conversion
auto dims = it.get_dims();

View File

@ -29,7 +29,7 @@ static inline ideep::tensor::data_type get_mkldnn_dtype(const Tensor& t) {
}
// Construct aten MKL-DNN tensor given an ideep tensor
TORCH_API Tensor new_with_itensor_mkldnn(ideep::tensor&& it, std::optional<ScalarType> dtype, c10::optional<Device> device);
TORCH_API Tensor new_with_itensor_mkldnn(ideep::tensor&& it, std::optional<ScalarType> dtype, std::optional<Device> device);
// Retrieve `ideep::tensor` from MKL-DNN tensor
TORCH_API ideep::tensor& itensor_from_mkldnn(const Tensor& mkldnn_tensor);

View File

@ -24,7 +24,7 @@ namespace at { namespace native {
#if AT_MKLDNN_ENABLED()
Tensor mkldnn_to_dense(const Tensor& mkldnn_tensor, std::optional<ScalarType> dtype, c10::optional<bool> masked_grad) {
Tensor mkldnn_to_dense(const Tensor& mkldnn_tensor, std::optional<ScalarType> dtype, std::optional<bool> masked_grad) {
TORCH_CHECK(mkldnn_tensor.scalar_type() == ScalarType::Float ||
mkldnn_tensor.scalar_type() == ScalarType::BFloat16 ||
mkldnn_tensor.scalar_type() == ScalarType::Half ||
@ -525,7 +525,7 @@ TORCH_LIBRARY_IMPL(mkldnn, CPU, m) {
#else
Tensor mkldnn_to_dense(const Tensor& mkldnn_tensor, std::optional<ScalarType> dtype, c10::optional<bool> masked_grad) {
Tensor mkldnn_to_dense(const Tensor& mkldnn_tensor, std::optional<ScalarType> dtype, std::optional<bool> masked_grad) {
TORCH_CHECK(false, "MKL-DNN build is disabled");
}

View File

@ -21,7 +21,7 @@ namespace at {
namespace native {
std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm(
const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
bool train,
double momentum,
double eps) {
@ -30,7 +30,7 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm(
std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm_backward(
const Tensor& grad_output,
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_invstd_opt,
bool train,
double eps,
std::array<bool,3> grad_input_mask) {
@ -45,7 +45,7 @@ static std::tuple<Tensor, Tensor, Tensor> mkldnn_layer_norm_last_index_weight_bi
}
std::tuple<Tensor, Tensor, Tensor> _mkldnn_batch_norm_legit(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var,
bool train,
double momentum,
double eps) {
@ -54,7 +54,7 @@ std::tuple<Tensor, Tensor, Tensor> _mkldnn_batch_norm_legit(
std::tuple<Tensor, Tensor, Tensor> _mkldnn_batch_norm_legit_no_stats(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
bool train,
double momentum,
double eps) {
@ -62,15 +62,15 @@ std::tuple<Tensor, Tensor, Tensor> _mkldnn_batch_norm_legit_no_stats(
}
std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_with_update_mkldnn(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
Tensor& running_mean, Tensor& running_var, double momentum, double eps) {
TORCH_CHECK(false, "_batch_norm_with_update_mkldnn: ATen not compiled with MKLDNN support");
}
std::tuple<Tensor, Tensor, Tensor> _new_batch_norm_backward_mkldnn(
const Tensor& grad_output, const Tensor& input, const Tensor& weight,
const std::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
const std::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_var_opt,
const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_var_opt,
bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) {
TORCH_CHECK(false, "_new_batch_norm_backward_mkldnn: ATen not compiled with MKLDNN support");
}
@ -131,7 +131,7 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_layer_norm_last_index_weight_bias_f32(
std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
bool train,
double momentum,
double eps) {
@ -209,7 +209,7 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm(
std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_with_update_mkldnn(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
Tensor& running_mean, Tensor& running_var, double momentum, double eps) {
Tensor output, save_mean, save_var;
std::tie(output, save_mean, save_var) =
@ -220,7 +220,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> _batch_norm_with_update_mkldnn(
std::tuple<Tensor, Tensor, Tensor> _mkldnn_batch_norm_legit(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, Tensor& running_mean, Tensor& running_var,
bool train,
double momentum,
double eps) {
@ -229,7 +229,7 @@ std::tuple<Tensor, Tensor, Tensor> _mkldnn_batch_norm_legit(
std::tuple<Tensor, Tensor, Tensor> _mkldnn_batch_norm_legit_no_stats(
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt,
bool train,
double momentum,
double eps) {
@ -239,15 +239,15 @@ std::tuple<Tensor, Tensor, Tensor> _mkldnn_batch_norm_legit_no_stats(
std::tuple<Tensor, Tensor, Tensor> _new_batch_norm_backward_mkldnn(
const Tensor& grad_output, const Tensor& input, const Tensor& weight,
const std::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt,
const std::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_var_opt,
const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_var_opt,
bool update, double eps, std::array<bool,3> grad_input_mask, const Tensor& reserve) {
return mkldnn_batch_norm_backward(grad_output, input, weight, running_mean_opt, running_var_opt, save_mean_opt, save_var_opt, update, eps, grad_input_mask);
}
std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm_backward(const Tensor& grad_output,
const Tensor& input, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt,
const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_invstd_opt,
bool train,
double eps,
std::array<bool,3> grad_input_mask) {

View File

@ -12,7 +12,7 @@ namespace at { namespace native {
#if AT_MKLDNN_ENABLED()
Tensor empty_mkldnn(IntArrayRef sizes, std::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory, c10::optional<c10::MemoryFormat> optional_memory_format) {
Tensor empty_mkldnn(IntArrayRef sizes, std::optional<ScalarType> dtype, std::optional<Layout> layout, std::optional<Device> device, std::optional<bool> pin_memory, std::optional<c10::MemoryFormat> optional_memory_format) {
TORCH_CHECK(
!optional_memory_format.has_value(),
"'memory_format' argument is incompatible with mkldnn tensor");
@ -26,7 +26,7 @@ Tensor empty_mkldnn(IntArrayRef sizes, std::optional<ScalarType> dtype, c10::opt
#else
Tensor empty_mkldnn(IntArrayRef sizes, std::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory, c10::optional<c10::MemoryFormat> optional_memory_format) {
Tensor empty_mkldnn(IntArrayRef sizes, std::optional<ScalarType> dtype, std::optional<Layout> layout, std::optional<Device> device, std::optional<bool> pin_memory, std::optional<c10::MemoryFormat> optional_memory_format) {
TORCH_CHECK(false, "empty_mkldnn: MKL-DNN build is disabled");
}

View File

@ -211,7 +211,7 @@ Tensor NestedTensor_batch_offsets_from_size_tensor(
}
Tensor NestedTensor_to_mask(const Tensor& nt, std::optional<int64_t> mask_dim, c10::optional<int64_t> mask_dim_length) {
Tensor NestedTensor_to_mask(const Tensor& nt, std::optional<int64_t> mask_dim, std::optional<int64_t> mask_dim_length) {
auto* nt_impl = get_nested_tensor_impl(nt);
TORCH_CHECK(nested_tensor_impl_is_contiguous(nt_impl), "to_mask only works on contiguous NestedTensors.");
TORCH_CHECK(

View File

@ -50,7 +50,7 @@ Tensor NestedTensor_from_padded_tensor_cpu(
const Tensor& padded,
const NestedTensorImpl& nt);
Tensor NestedTensor_to_mask(const Tensor& nt, std::optional<int64_t> mask_dim, c10::optional<int64_t> mask_dim_length);
Tensor NestedTensor_to_mask(const Tensor& nt, std::optional<int64_t> mask_dim, std::optional<int64_t> mask_dim_length);
template <typename T>
void remove_padding_kernelLauncher(

View File

@ -380,7 +380,7 @@ Tensor q_batch_norm_impl(
} // namespace
Tensor quantized_batch_norm(
const Tensor& qx, const std::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
const Tensor& qx, const std::optional<Tensor>& weight_opt /* optional */, const std::optional<Tensor>& bias_opt /* optional */,
const Tensor& mean /* optional */,
const Tensor& var /* optional */,
double eps,

View File

@ -494,7 +494,7 @@ static at::Tensor _quantized_convolution_onednn(
std::optional<c10::string_view> binary_attr=c10::nullopt,
std::optional<at::Scalar> binary_alpha=c10::nullopt,
std::optional<c10::string_view> unary_attr=c10::nullopt,
torch::List<std::optional<at::Scalar>> unary_scalars=torch::List<c10::optional<at::Scalar>>(),
torch::List<std::optional<at::Scalar>> unary_scalars=torch::List<std::optional<at::Scalar>>(),
std::optional<c10::string_view> unary_algorithm=c10::nullopt);
#endif // #if AT_MKLDNN_ENABLED()

View File

@ -258,7 +258,7 @@ Tensor& threshold_backward_sparse_out(
Tensor nan_to_num_sparse(
const Tensor &self, std::optional<double> nan,
std::optional<double> posinf, c10::optional<double> neginf) {
std::optional<double> posinf, std::optional<double> neginf) {
return coalesced_unary_ufunc(
self, [&](const Tensor &t) {
return at::nan_to_num(t, nan, posinf, neginf);
@ -266,7 +266,7 @@ Tensor nan_to_num_sparse(
}
Tensor& nan_to_num_sparse_out(
const Tensor &self, std::optional<double> nan,
std::optional<double> posinf, c10::optional<double> neginf,
std::optional<double> posinf, std::optional<double> neginf,
Tensor &out) {
return coalesced_unary_ufunc_out(
self, out, [&](const Tensor &t, Tensor &out) {
@ -275,7 +275,7 @@ Tensor& nan_to_num_sparse_out(
}
Tensor& nan_to_num_sparse_(
Tensor &self, std::optional<double> nan,
std::optional<double> posinf, c10::optional<double> neginf) {
std::optional<double> posinf, std::optional<double> neginf) {
TORCH_CHECK(self.is_coalesced(), "nan_to_num_ requires coalesced input");
return nan_to_num_sparse_out(self, nan, posinf, neginf, self);
}

View File

@ -423,7 +423,7 @@ std::tuple<Tensor, Tensor> native_multi_head_attention_cpu(
}
int64_t _fused_sdp_choice_cpp(const Tensor& query_, const Tensor& key, const Tensor& value,
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, std::optional<double> scale){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_, dropout_p, is_causal};
auto backend = sdp::select_sdp_backend_cpp(kernel_params);
if (backend == sdp::SDPBackend::error) {
@ -512,7 +512,7 @@ inline void validate_sdpa_input(
// the math and memory efficient attn_mask implementation
// Args:
// attn_mask: attn_mask of shape (B, L, S) or (L, S) or (B, N_heads, L, S)
std::optional<Tensor> convert_boolean_attn_mask(const c10::optional<Tensor>& attn_mask, caffe2::TypeMeta dtype) {
std::optional<Tensor> convert_boolean_attn_mask(const std::optional<Tensor>& attn_mask, caffe2::TypeMeta dtype) {
// Pass through
if(!attn_mask.has_value()){
return c10::nullopt;
@ -598,7 +598,7 @@ at::Tensor post_process_flash_output(
}
int64_t handle_private_use(const Tensor& query_, const Tensor& key, const Tensor& value,
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, std::optional<double> scale){
int64_t choice_int = static_cast<int64_t>(sdp::SDPBackend::math);
try {
choice_int = _fused_sdp_choice_stub(query_.device().type(),
@ -720,7 +720,7 @@ Tensor scaled_dot_product_attention(
std::tuple<Tensor, Tensor> _scaled_dot_product_attention_math(
const Tensor& query_, const Tensor& key, const Tensor& value,
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal,
const std::optional<Tensor>& dropout_mask, c10::optional<double> scale) {
const std::optional<Tensor>& dropout_mask, std::optional<double> scale) {
C10_LOG_API_USAGE_ONCE("torch.sdpa.math_fallback");
if (query_.is_nested() || key.is_nested() || value.is_nested()) {
TORCH_CHECK(

View File

@ -9,7 +9,7 @@ namespace at {
namespace native {
using fused_sdp_choice_fn = int64_t (*)(const Tensor& query_, const Tensor& key, const Tensor& value,
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale);
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, std::optional<double> scale);
DECLARE_DISPATCH(fused_sdp_choice_fn, _fused_sdp_choice_stub);

View File

@ -817,7 +817,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> _scaled_dot_product_efficient_attenti
}
int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value,
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale){
const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, std::optional<double> scale){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_, dropout_p, is_causal};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::error) {

View File

@ -60,7 +60,7 @@ inline Tensor to_meta(const Tensor& t) {
/*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);
}
inline std::optional<Tensor> to_meta(const c10::optional<Tensor>& t) {
inline std::optional<Tensor> to_meta(const std::optional<Tensor>& t) {
if (t.has_value()) {
return c10::make_optional<Tensor>(to_meta(*t));
}

View File

@ -398,7 +398,7 @@ class TORCH_API Tensor: public TensorBase {
/// // f requires grad, has no operation creating it
/// @endcode
/// \fn void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
/// \fn void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, std::optional<TensorList> inputs=c10::nullopt) const;
///
/// Computes the gradient of current tensor with respect to graph leaves.
///
@ -433,7 +433,7 @@ class TORCH_API Tensor: public TensorBase {
/// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
/// It is an implementation detail on which the user should not rely.
/// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const {
void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, std::optional<TensorList> inputs=c10::nullopt) const {
// NB: Adding this wrapper to _backward here because we'd like our
// 'backwards' api to accept the 'inputs' argument optionally. Since code gen
// currently does not support optional of TensorList our approach is to replace
@ -626,7 +626,7 @@ class TORCH_API Tensor: public TensorBase {
return TensorBase::data();
}
void _backward(TensorList inputs, const std::optional<Tensor>& gradient, c10::optional<bool> keep_graph, bool create_graph) const;
void _backward(TensorList inputs, const std::optional<Tensor>& gradient, std::optional<bool> keep_graph, bool create_graph) const;
const Tensor& requires_grad_(bool _requires_grad=true) const {
TensorBase::requires_grad_(_requires_grad);

View File

@ -15,8 +15,8 @@ using namespace at;
static int test_int;
Tensor empty_override(SymIntArrayRef size, std::optional<ScalarType> dtype, c10::optional<Layout> layout,
std::optional<Device> device, c10::optional<bool> pin_memory, c10::optional<MemoryFormat> optional_memory_format) {
Tensor empty_override(SymIntArrayRef size, std::optional<ScalarType> dtype, std::optional<Layout> layout,
std::optional<Device> device, std::optional<bool> pin_memory, std::optional<MemoryFormat> optional_memory_format) {
test_int = 1;
auto tensor_impl = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(
Storage(

View File

@ -142,7 +142,7 @@ TEST(OptionalTest, Nullopt) {
// Ensure comparisons work...
using CmpTestTypes = testing::Types<
// between two optionals
std::pair<std::optional<int>, c10::optional<int>>,
std::pair<std::optional<int>, std::optional<int>>,
// between an optional and a value
std::pair<std::optional<int>, int>,

View File

@ -511,7 +511,7 @@ TEST(ExternalCall, Prepacked_Linear_float) {
const std::optional<at::Scalar>&,
const std::optional<at::Scalar>&)>();
auto prepacked = linear_clamp_prepack_op.call(
weight, bias, std::optional<at::Scalar>(), c10::optional<at::Scalar>());
weight, bias, std::optional<at::Scalar>(), std::optional<at::Scalar>());
BufHandle DummyPrepacked("DummyPrepacked", {1}, kFloat);
Tensor Result = Tensor(

View File

@ -20,8 +20,8 @@ Tensor get_tensor(caffe2::TypeMeta dtype, IntArrayRef size) {
return Tensor(std::move(tensor_impl));
}
Tensor empty_override(IntArrayRef size, std::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device,
std::optional<bool> pin_memory, c10::optional<c10::MemoryFormat> optional_memory_format) {
Tensor empty_override(IntArrayRef size, std::optional<ScalarType> dtype, std::optional<Layout> layout, std::optional<Device> device,
std::optional<bool> pin_memory, std::optional<c10::MemoryFormat> optional_memory_format) {
test_int = 0;
return get_tensor(scalarTypeToTypeMeta(dtype_or_default(dtype)), size);
}

View File

@ -134,7 +134,7 @@ at::Tensor custom_empty_memory_format(at::IntArrayRef size,
memory_format);
}
at::Tensor custom_empty_strided(c10::IntArrayRef size, c10::IntArrayRef stride, std::optional<at::ScalarType> dtype_opt, c10::optional<at::Layout> layout_opt, c10::optional<at::Device> device_opt, c10::optional<bool> pin_memory_opt) {
at::Tensor custom_empty_strided(c10::IntArrayRef size, c10::IntArrayRef stride, std::optional<at::ScalarType> dtype_opt, std::optional<at::Layout> layout_opt, std::optional<at::Device> device_opt, std::optional<bool> pin_memory_opt) {
op_counter += 1;
constexpr c10::DispatchKeySet private_use_ks(c10::DispatchKey::PrivateUse1);

View File

@ -104,7 +104,7 @@ std::ostream& operator<<(
}
/// A utility class that accepts either a container of `D`-many
/// `std::optional<T>` values, or a single `c10::optional<T>` value, which is
/// `std::optional<T>` values, or a single `std::optional<T>` value, which is
/// internally repeated `D` times. It has the additional ability to accept
/// containers of the underlying type `T` and convert them to a container of
/// `std::optional<T>`.

View File

@ -19,7 +19,7 @@ Scatter::Scatter(
std::vector<at::Device> devices,
std::optional<std::vector<int64_t>> chunk_sizes,
int64_t dim,
std::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams,
std::optional<std::vector<std::optional<at::cuda::CUDAStream>>> streams,
bool unsqueeze_scalars)
: devices_(std::move(devices)),
chunk_sizes_(std::move(chunk_sizes)),

View File

@ -19,7 +19,7 @@ struct TORCH_CUDA_CU_API Scatter : public Node {
std::vector<at::Device> devices,
std::optional<std::vector<int64_t>> chunk_sizes = c10::nullopt,
int64_t dim = 0,
std::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams =
std::optional<std::vector<std::optional<at::cuda::CUDAStream>>> streams =
c10::nullopt,
bool unsqueeze_scalars = false);
~Scatter() override;
@ -29,7 +29,7 @@ struct TORCH_CUDA_CU_API Scatter : public Node {
std::vector<at::Device> devices_;
std::optional<std::vector<int64_t>> chunk_sizes_;
int64_t dim_;
std::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams_;
std::optional<std::vector<std::optional<at::cuda::CUDAStream>>> streams_;
bool unsqueeze_scalars_;
};

View File

@ -232,7 +232,7 @@ std::vector<at::Tensor>& scatter_out(
const at::Tensor& tensor,
std::vector<at::Tensor>& out_tensors,
int64_t dim,
const std::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
streams) {
TORCH_CHECK(
!out_tensors.empty(),
@ -315,7 +315,7 @@ std::vector<at::Tensor> scatter(
at::IntArrayRef devices,
const std::optional<std::vector<int64_t>>& chunk_sizes,
int64_t dim,
const std::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
streams) {
TORCH_CHECK(!devices.empty(), "Expected at least one device to scatter to");
if (chunk_sizes.has_value()) {

View File

@ -28,7 +28,7 @@ TORCH_CUDA_CU_API std::vector<at::Tensor>& scatter_out(
const at::Tensor& tensor,
std::vector<at::Tensor>& out_tensors,
int64_t dim = 0,
const std::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
streams = c10::nullopt);
TORCH_CUDA_CU_API std::vector<at::Tensor> scatter(
@ -36,7 +36,7 @@ TORCH_CUDA_CU_API std::vector<at::Tensor> scatter(
at::IntArrayRef devices,
const std::optional<std::vector<int64_t>>& chunk_sizes = c10::nullopt,
int64_t dim = 0,
const std::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>&
const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
streams = c10::nullopt);
TORCH_CUDA_CU_API at::Tensor& gather_out(

View File

@ -49,7 +49,7 @@ void initCommMethods(PyObject* module) {
std::optional<std::vector<int64_t>> chunk_sizes,
int64_t dim,
std::optional<py::object> py_streams) {
std::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>
std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>
streams;
if (py_streams) {
py::handle handle = *py_streams;
@ -70,7 +70,7 @@ void initCommMethods(PyObject* module) {
std::vector<at::Tensor>& out_tensors,
int64_t dim,
std::optional<py::object> py_streams) {
std::optional<std::vector<c10::optional<at::cuda::CUDAStream>>>
std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>
streams;
if (py_streams) {
py::handle handle = *py_streams;

View File

@ -13,7 +13,7 @@ namespace c10d {
// callback function will be given arguments (optional<string> oldValue,
// optional<string> newValue)
using WatchKeyCallback =
std::function<void(std::optional<std::string>, c10::optional<std::string>)>;
std::function<void(std::optional<std::string>, std::optional<std::string>)>;
class TORCH_API Store : public torch::CustomClassHolder {
public:

View File

@ -134,7 +134,7 @@ std::vector<size_t> get_tensor_parameter_index(
}
} else if (
*arguments[idx].real_type() ==
*c10::getTypePtr<c10::optional<at::Tensor>>()) {
*c10::getTypePtr<std::optional<at::Tensor>>()) {
// optional tensor
if (stack[idx].toOptional<at::Tensor>().has_value()) {
tensor_parameter_index.push_back(idx);

View File

@ -44,7 +44,7 @@ void ConstantValueMap::SetAllGraphInputsStatic(bool all_static) {
c10::make_optional(all_static);
}
c10::optional<bool> ConstantValueMap::GetAllGraphInputsStatic() {
std::optional<bool> ConstantValueMap::GetAllGraphInputsStatic() {
return ConstantValueMap::getInstance().allGraphInputsStatic;
}

View File

@ -27,7 +27,7 @@ class ConstantValueMap {
static std::optional<size_t> GetRank(const std::string& tensorName);
static void SetAllGraphInputsStatic(bool all_static);
static c10::optional<bool> GetAllGraphInputsStatic();
static std::optional<bool> GetAllGraphInputsStatic();
static void SetAllGraphInputsReliableComputed(bool computed);
static bool GetAllGraphInputsReliableComputed();
@ -110,7 +110,7 @@ class ConstantValueMap {
SymbolDimMap symbolDimMap;
DimSymbolMap dimSymbolMap;
// Stores if all graph-level inputs have static shape
c10::optional<bool> allGraphInputsStatic;
std::optional<bool> allGraphInputsStatic;
// True if reliable has been computed for all graph inputs
bool allGraphInputsReliableComputed;
};

View File

@ -19,7 +19,7 @@ namespace jit {
// Conservatively compare two optionals. If both are undefined, assume
// they aren't equal
template <typename T>
static bool mustBeEqual(const std::optional<T>& a, const c10::optional<T>& b) {
static bool mustBeEqual(const std::optional<T>& a, const std::optional<T>& b) {
return a == b && a.has_value();
}

View File

@ -65,7 +65,7 @@ namespace jit {
struct ShapeArg
: public std::
pair<std::optional<c10::ShapeSymbol>, c10::optional<int64_t>> {
pair<std::optional<c10::ShapeSymbol>, std::optional<int64_t>> {
using pair::pair;
static ShapeArg unknownInteger() {

View File

@ -865,7 +865,7 @@ void initPythonIRBindings(PyObject* module_) {
})
.def(
"with_sizes",
[](Type& t, std::optional<std::vector<c10::optional<int64_t>>> sizes)
[](Type& t, std::optional<std::vector<std::optional<int64_t>>> sizes)
-> py::object {
auto ptt = t.expect<TensorType>();
if (!ptt) {

View File

@ -26,6 +26,6 @@ struct UnwindError : public std::runtime_error {
// #define PRINT_LINE_TABLE(...) LOG_INFO(__VA_ARGS__)
#define PRINT_LINE_TABLE(...)
using c10::optional; // NOLINT
using std::optional; // NOLINT
} // namespace torch::unwind