update aten op overload to not use from to avoid compile errors (#89797)

Fix for https://github.com/pytorch/pytorch/issues/93591 by changing `random_.from` to `random_.from_int`.

The previous signature would fail when printed in an fx graph, because `from` is a reserved python keyword. This change affects serialization but I have added an adapter.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/89797
Approved by: https://github.com/tugsbayasgalan
This commit is contained in:
Elias Ellison 2023-02-08 21:47:37 +00:00 committed by PyTorch MergeBot
parent f2156ef42b
commit 021d267694
19 changed files with 247 additions and 111 deletions

View File

@ -1 +1 @@
9cbcdb4008c14ad8251c5d4d7723aa616f659edb
a121c7d3353f1c313ddc0fc97cc41162a3dd28e4

View File

@ -67,7 +67,7 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) {
m.impl("poisson", unsupportedRandomOp<const Tensor&, optional<Generator>>);
m.impl("random_.from", unsupportedRandomOp_<Tensor&, int64_t, optional<int64_t>, optional<Generator>>);
m.impl("random_.from_int", unsupportedRandomOp_<Tensor&, int64_t, optional<int64_t>, optional<Generator>>);
m.impl("random_.to", unsupportedRandomOp_<Tensor&, int64_t, optional<Generator>>);
m.impl("random_", unsupportedRandomOp_<Tensor&, optional<Generator>>);

View File

@ -384,7 +384,7 @@ TORCH_LIBRARY_IMPL(aten, Named, m) {
m.impl("rand_like", CppFunction::makeFallthrough());
m.impl("randn_like", CppFunction::makeFallthrough());
m.impl("random_", CppFunction::makeFallthrough());
m.impl("random_.from", CppFunction::makeFallthrough());
m.impl("random_.from_int", CppFunction::makeFallthrough());
m.impl("random_.to", CppFunction::makeFallthrough());
m.impl("real", CppFunction::makeFallthrough());
m.impl("reciprocal", CppFunction::makeFallthrough());

View File

@ -451,7 +451,7 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchVmapMode, m) {
RANDOM_BATCH_RULE2(rand, names);
RANDOM_INPLACE_BATCH_RULE(random_);
RANDOM_INPLACE_BATCH_RULE2(random_, from);
RANDOM_INPLACE_BATCH_RULE2(random_, from_int);
RANDOM_INPLACE_BATCH_RULE2(random_, to);
RANDOM_INPLACE_BATCH_RULE(cauchy_);

View File

@ -261,7 +261,7 @@ Tensor& bernoulli_mps_(Tensor& self, const Tensor& p_, c10::optional<Generator>
return mps::bernoulli_mps_impl(self, p_, gen, __func__);
}
// random_.from
// random_.from_int
Tensor& random_mps_(Tensor& self, int64_t from, c10::optional<int64_t> to_opt, c10::optional<Generator> gen) {
auto input_dtype = self.scalar_type();
int64_t to = 0;

View File

@ -8024,7 +8024,7 @@
CPU, CUDA: addbmm
MPS: addbmm_mps
- func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
- func: random_.from_int(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator
variants: method
tags: nondeterministic_seeded
@ -8032,7 +8032,7 @@
CPU, CUDA: random_
Meta: random_meta_
MPS: random_mps_
autogen: random.from, random.from_out
autogen: random.from_int, random.from_int_out
- func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
device_check: NoCheck # TensorIterator

View File

@ -102,7 +102,7 @@ full_codegen:
- pow.Tensor_Scalar
- pow.Tensor_Tensor
- random
- random.from
- random.from_int
- random.to
- reciprocal
- relu

View File

@ -131,7 +131,7 @@ Tensor& bernoulli_out(const Tensor& self, c10::optional<Generator> gen, Tensor&
TORCH_LIBRARY_IMPL(aten, CustomRNGKeyId, m) {
// Random
m.impl("random_.from", random_from_to);
m.impl("random_.from_int", random_from_to);
m.impl("random_.to", random_to);
m.impl("random_", random_);
// Normal

View File

@ -6,7 +6,7 @@ namespace serialize {
constexpr uint64_t kMinSupportedFileFormatVersion = 0x1L;
constexpr uint64_t kMaxSupportedFileFormatVersion = 0xAL;
constexpr uint64_t kMaxSupportedFileFormatVersion = 0xBL;
// Versions (i.e. why was the version number bumped?)

View File

@ -56,7 +56,7 @@ size_t getInstanceCount() {
}
TORCH_LIBRARY_IMPL(aten, CustomRNGKeyId, m) {
m.impl("aten::random_.from", random_from_to);
m.impl("aten::random_.from_int", random_from_to);
m.impl("aten::random_.to", random_to);
m.impl("aten::random_", random_);
}

View File

@ -1060,13 +1060,13 @@ aten::randn.names_out
aten::randn_like
aten::randn_like.out
aten::random
aten::random.from
aten::random.from_out
aten::random.from_int
aten::random.from_int_out
aten::random.out
aten::random.to
aten::random.to_out
aten::random_
aten::random_.from
aten::random_.from_int
aten::random_.to
aten::randperm
aten::randperm.generator

View File

@ -331,6 +331,8 @@ ALLOW_LIST = [
("prim::CudaFusionGroup", datetime.date(2023, 2, 1)),
("prim::CudaFusionViewGuard", datetime.date(2023, 2, 1)),
("prim::CudaFusionSizeEq", datetime.date(2023, 2, 1)),
("aten::random.from_out", datetime.date(2023, 3, 3)),
("aten::random_.from", datetime.date(2023, 3, 3)),
("prim::transpose_copy.int", datetime.date(2023, 2, 1)),
("prim::expand_as_copy", datetime.date(2023, 2, 1)),
("prim::squeeze_copy", datetime.date(2023, 2, 1)),

View File

@ -540,3 +540,34 @@ class TestSaveLoadForOpVersion(JitTestCase):
self.assertTrue(output.size(dim=0) == 100)
# "Upgraded" model should match the new version output
self.assertEqual(output, output_current)
def test_versioned_random_(self):
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
out = torch.zeros_like(x)
return out.random_(0, 10)
paths = [
"/jit/fixtures/test_versioned_random_v10.ptl",
"/jit/fixtures/test_versioned_random_func_v10.ptl",
"/jit/fixtures/test_versioned_random_out_v10.ptl"
]
for path in paths:
model_path = pytorch_test_dir + path
loaded_model = torch.jit.load(model_path)
buffer = io.BytesIO(loaded_model._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v10_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
inp = torch.rand([20, 20])
with torch.testing._internal.common_utils.freeze_rng_state():
output = v10_mobile_module(inp)
with torch.testing._internal.common_utils.freeze_rng_state():
output_current = current_mobile_module(inp)
# "Upgraded" model should match the new version output
self.assertEqual(output, output_current)

View File

@ -744,6 +744,12 @@ class FakeTensorOperatorInvariants(TestCase):
has_kwarg_device or op == torch.ops.aten._list_to_tensor.default
)
def test_no_reserved_keywords(self):
for schema in self.get_all_aten_schemas():
op = self.get_aten_op(schema)
# will fail if a reserve keyword is used as operator name or overload
eval(str(op), {"aten": torch.ops.aten})
@unittest.expectedFailure
def test_sparse_new(self):
with FakeTensorMode():

View File

@ -5741,7 +5741,7 @@ class TestNLLLoss(TestCase):
mps_x = torch.randn(5, device='mps', generator=g_mps)
self.assertEqual(mps_x, mps_y)
# Test random_.to and random_.from
# Test random_.to and random_.from_int
def test_random(self):
def helper(shape, low, high, dtype=torch.int32):

View File

@ -1304,7 +1304,7 @@
self: rad2deg_backward(grad)
result: auto_element_wise
- name: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
- name: random_.from_int(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
self: zeros_like(grad)
result: self_t.zero_()

View File

@ -87,6 +87,18 @@ getOperatorVersionMapForMobile() {
std::vector<Upgrader>({
Upgrader({0, 8, "logspace_out_0_8", 16})
})},
{std::string("aten::random.from_int"),
std::vector<Upgrader>({
Upgrader({0, 10, "random_from_0_10", 18})
})},
{std::string("aten::random.from_int_out"),
std::vector<Upgrader>({
Upgrader({0, 10, "random_from_out_0_10", 19})
})},
{std::string("aten::random_.from_int"),
std::vector<Upgrader>({
Upgrader({0, 10, "random__from_0_10", 17})
})},
});
return operatorVersionMapForMobile;
}
@ -666,6 +678,67 @@ const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
OperatorString({"prim::unchecked_cast", "", 1}),
}), // operators list
}),
ByteCodeFunctionWithOperator({
mobile::Function::registerFunc(
"random__from_0_10",
std::vector<Instruction>({
Instruction{OpCode::STOREN, 1, 4},
Instruction{OpCode::MOVE, 1, 0},
Instruction{OpCode::MOVE, 2, 0},
Instruction{OpCode::MOVE, 3, 0},
Instruction{OpCode::MOVE, 4, 0},
Instruction{OpCode::OP, 0, 0},
Instruction{OpCode::RET, 0, 0},
}), // instructions list,
std::vector<c10::IValue>(), // constants list,
std::vector<c10::TypePtr>(), // types list,
4
),
std::vector<OperatorString>({
OperatorString({"aten::random_", "from_int", 4}),
}), // operators list
}),
ByteCodeFunctionWithOperator({
mobile::Function::registerFunc(
"random_from_0_10",
std::vector<Instruction>({
Instruction{OpCode::STOREN, 1, 4},
Instruction{OpCode::MOVE, 1, 0},
Instruction{OpCode::MOVE, 2, 0},
Instruction{OpCode::MOVE, 3, 0},
Instruction{OpCode::MOVE, 4, 0},
Instruction{OpCode::OP, 0, 0},
Instruction{OpCode::RET, 0, 0},
}), // instructions list,
std::vector<c10::IValue>(), // constants list,
std::vector<c10::TypePtr>(), // types list,
4
),
std::vector<OperatorString>({
OperatorString({"aten::random", "from_int", 4}),
}), // operators list
}),
ByteCodeFunctionWithOperator({
mobile::Function::registerFunc(
"random_from_out_0_10",
std::vector<Instruction>({
Instruction{OpCode::STOREN, 1, 5},
Instruction{OpCode::MOVE, 1, 0},
Instruction{OpCode::MOVE, 2, 0},
Instruction{OpCode::MOVE, 3, 0},
Instruction{OpCode::MOVE, 4, 0},
Instruction{OpCode::MOVE, 5, 0},
Instruction{OpCode::OP, 0, 0},
Instruction{OpCode::RET, 0, 0},
}), // instructions list,
std::vector<c10::IValue>(), // constants list,
std::vector<c10::TypePtr>(), // types list,
5
),
std::vector<OperatorString>({
OperatorString({"aten::random", "from_int_out", 5}),
}), // operators list
}),
});
for (const auto& upgrader_function : upgrader_function_list) {
for (const auto& op : upgrader_function.operators) {

View File

@ -14,90 +14,90 @@
namespace torch {
namespace jit {
static std::unordered_map<std::string, std::string> kUpgradersEntryMap({
{"logspace_0_8", R"SCRIPT(
static std::unordered_map<std::string, std::string> kUpgradersEntryMap(
{{"logspace_0_8", R"SCRIPT(
def logspace_0_8(start: Union[int, float, complex], end: Union[int, float, complex], steps: Optional[int], base: float, *, dtype: Optional[int], layout: Optional[int],
device: Optional[Device], pin_memory: Optional[bool]):
if (steps is None):
return torch.logspace(start=start, end=end, steps=100, base=base, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory)
return torch.logspace(start=start, end=end, steps=steps, base=base, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory)
)SCRIPT"},
{"logspace_out_0_8", R"SCRIPT(
{"logspace_out_0_8", R"SCRIPT(
def logspace_out_0_8(start: Union[int, float, complex], end: Union[int, float, complex], steps: Optional[int], base: float, *, out: Tensor):
if (steps is None):
return torch.logspace(start=start, end=end, steps=100, base=base, out=out)
return torch.logspace(start=start, end=end, steps=steps, base=base, out=out)
)SCRIPT"},
{"linspace_0_7", R"SCRIPT(
{"linspace_0_7", R"SCRIPT(
def linspace_0_7(start: Union[int, float, complex], end: Union[int, float, complex], steps: Optional[int], *, dtype: Optional[int], layout: Optional[int],
device: Optional[Device], pin_memory: Optional[bool]):
if (steps is None):
return torch.linspace(start=start, end=end, steps=100, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory)
return torch.linspace(start=start, end=end, steps=steps, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory)
)SCRIPT"},
{"linspace_out_0_7", R"SCRIPT(
{"linspace_out_0_7", R"SCRIPT(
def linspace_out_0_7(start: Union[int, float, complex], end: Union[int, float, complex], steps: Optional[int], *, out: Tensor):
if (steps is None):
return torch.linspace(start=start, end=end, steps=100, out=out)
return torch.linspace(start=start, end=end, steps=steps, out=out)
)SCRIPT"},
{"div_Tensor_0_3", R"SCRIPT(
{"div_Tensor_0_3", R"SCRIPT(
def div_Tensor_0_3(self: Tensor, other: Tensor) -> Tensor:
if (self.is_floating_point() or other.is_floating_point()):
return self.true_divide(other)
return self.divide(other, rounding_mode='trunc')
)SCRIPT"},
{"div_Tensor_mode_0_3", R"SCRIPT(
{"div_Tensor_mode_0_3", R"SCRIPT(
def div_Tensor_mode_0_3(self: Tensor, other: Tensor, *, rounding_mode: Optional[str]=None) -> Tensor:
return self.divide(other, rounding_mode=rounding_mode)
)SCRIPT"},
{"div_Scalar_0_3", R"SCRIPT(
{"div_Scalar_0_3", R"SCRIPT(
def div_Scalar_0_3(self: Tensor, other: number) -> Tensor:
if (self.is_floating_point() or isinstance(other, float)):
return self.true_divide(other)
return self.divide(other, rounding_mode='trunc')
)SCRIPT"},
{"div_Scalar_mode_0_3", R"SCRIPT(
{"div_Scalar_mode_0_3", R"SCRIPT(
def div_Scalar_mode_0_3(self: Tensor, other: number, *, rounding_mode: Optional[str]=None) -> Tensor:
return self.divide(other, rounding_mode=rounding_mode)
)SCRIPT"},
{"div_out_0_3", R"SCRIPT(
{"div_out_0_3", R"SCRIPT(
def div_out_0_3(self: Tensor, other: Tensor, *, out: Tensor) -> Tensor:
if (self.is_floating_point() or other.is_floating_point() or out.is_floating_point()):
return self.true_divide(other, out=out)
return self.divide(other, rounding_mode='trunc', out=out)
)SCRIPT"},
{"div_out_mode_0_3", R"SCRIPT(
{"div_out_mode_0_3", R"SCRIPT(
def div_out_mode_0_3(self: Tensor, other: Tensor, *, rounding_mode: Optional[str]=None, out: Tensor) -> Tensor:
return self.divide(other, rounding_mode=rounding_mode, out=out)
)SCRIPT"},
{"div__Tensor_0_3", R"SCRIPT(
{"div__Tensor_0_3", R"SCRIPT(
def div__Tensor_0_3(self: Tensor, other: Tensor) -> Tensor:
if (self.is_floating_point() or other.is_floating_point()):
return self.true_divide_(other)
return self.divide_(other, rounding_mode='trunc')
)SCRIPT"},
{"div__Tensor_mode_0_3", R"SCRIPT(
{"div__Tensor_mode_0_3", R"SCRIPT(
def div__Tensor_mode_0_3(self: Tensor, other: Tensor, *, rounding_mode: Optional[str]=None) -> Tensor:
return self.divide_(other, rounding_mode=rounding_mode)
)SCRIPT"},
{"div__Scalar_0_3", R"SCRIPT(
{"div__Scalar_0_3", R"SCRIPT(
def div__Scalar_0_3(self: Tensor, other: number) -> Tensor:
if (self.is_floating_point() or isinstance(other, float)):
return self.true_divide_(other)
return self.divide_(other, rounding_mode='trunc')
)SCRIPT"},
{"div__Scalar_mode_0_3", R"SCRIPT(
{"div__Scalar_mode_0_3", R"SCRIPT(
def div__Scalar_mode_0_3(self: Tensor, other: number, *, rounding_mode: Optional[str]=None) -> Tensor:
return self.divide_(other, rounding_mode=rounding_mode)
)SCRIPT"},
{"full_names_0_4", R"SCRIPT(
{"full_names_0_4", R"SCRIPT(
def full_names_0_4(size:List[int], fill_value:number, *, names:Optional[List[str]]=None,
dtype:Optional[int]=None, layout:Optional[int]=None, device:Optional[Device]=None,
pin_memory:Optional[bool]=None) -> Tensor:
return torch.full(size, fill_value, names=names, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory)
)SCRIPT"},
{"full_0_4", R"SCRIPT(
{"full_0_4", R"SCRIPT(
def full_0_4(size:List[int], fill_value:number, *, dtype:Optional[int]=None,
layout:Optional[int]=None, device:Optional[Device]=None,
pin_memory:Optional[bool]=None) -> Tensor:
@ -105,19 +105,30 @@ def full_0_4(size:List[int], fill_value:number, *, dtype:Optional[int]=None,
fill_value = float(fill_value)
return torch.full(size, fill_value, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory)
)SCRIPT"},
{"full_out_0_4", R"SCRIPT(
{"full_out_0_4", R"SCRIPT(
def full_out_0_4(size:List[int], fill_value:number, *, out:Tensor) -> Tensor:
return torch.full(size, fill_value, out=out)
)SCRIPT"},
{"gelu_0_9", R"SCRIPT(
{"gelu_0_9", R"SCRIPT(
def gelu_0_9(self: Tensor) -> Tensor:
return torch.gelu(self, approximate='none')
)SCRIPT"},
{"gelu_out_0_9", R"SCRIPT(
{"gelu_out_0_9", R"SCRIPT(
def gelu_out_0_9(self: Tensor, *, out: Tensor) -> Tensor:
return torch.gelu(self, approximate='none', out=out)
)SCRIPT"},
});
{"random__from_0_10", R"SCRIPT(
def random__from_0_10(self: Tensor, from: int, to: Optional[int], *, generator: None = None) -> Tensor:
return torch.random_(self, from, to, generator=generator)
)SCRIPT"},
{"random_from_0_10", R"SCRIPT(
def random_from_0_10(self: Tensor, from: int, to: Optional[int], *, generator: None = None) -> Tensor:
return torch.random(self, from, to, generator=generator)
)SCRIPT"},
{"random_from_out_0_10", R"SCRIPT(
def random_from_out_0_10(self: Tensor, from: int, to: Optional[int], *, generator: None = None, out: Tensor) -> Tensor:
return torch.random(self, from, to, generator=generator, out=out)
)SCRIPT"}});
std::shared_ptr<Graph> create_upgrader_graph(
const std::string& upgrader_name,

View File

@ -15,80 +15,93 @@ static bool isVersionMapSorted = false;
// Main entry point for all operators that have valid upgraders.
// Note for developers: The list of upgraders should be SORTED
// by the version number where the upgrader is registered.
static std::unordered_map<std::string, std::vector<UpgraderEntry>> operatorVersionMap(
{{"aten::logspace",
{{9,
"logspace_0_8",
"aten::logspace(Scalar start, Scalar end, int? steps=None, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"}}},
{"aten::logspace.out",
{{9,
"logspace_out_0_8",
"aten::logspace.out(Scalar start, Scalar end, int? steps=None, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::linspace",
{{8,
"linspace_0_7",
"aten::linspace(Scalar start, Scalar end, int? steps=None, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"}}},
{"aten::linspace.out",
{{8,
"linspace_out_0_7",
"aten::linspace.out(Scalar start, Scalar end, int? steps=None, *, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::div.Tensor",
{{4,
"div_Tensor_0_3",
"aten::div.Tensor(Tensor self, Tensor other) -> Tensor"}}},
{"aten::div.Tensor_mode",
{{4,
"div_Tensor_mode_0_3",
"aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor"}}},
{"aten::div.Scalar",
{{4,
"div_Scalar_0_3",
"aten::div.Scalar(Tensor self, Scalar other) -> Tensor"}}},
{"aten::div.Scalar_mode",
{{4,
"div_Scalar_mode_0_3",
"aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor"}}},
{"aten::div.out",
{{4,
"div_out_0_3",
"aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::div.out_mode",
{{4,
"div_out_mode_0_3",
"aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::div_.Tensor",
{{4,
"div__Tensor_0_3",
"aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"}}},
{"aten::div_.Tensor_mode",
{{4,
"div__Tensor_mode_0_3",
"aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)"}}},
{"aten::div_.Scalar",
{{4,
"div__Scalar_0_3",
"aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"}}},
{"aten::div_.Scalar_mode",
{{4,
"div__Scalar_mode_0_3",
"aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)"}}},
{"aten::full",
{{5,
"full_0_4",
"aten::full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"}}},
{"aten::full.names",
{{5,
"full_names_0_4",
"aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"}}},
{"aten::full.out",
{{5,
"full_out_0_4",
"aten::full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::gelu", {{10, "gelu_0_9", "aten::gelu(Tensor self) -> Tensor"}}},
{"aten::gelu.out",
{{10,
"gelu_out_0_9",
"aten::gelu.out(Tensor self, *, Tensor(a!) out) -> Tensor"}}}});
static std::unordered_map<std::string, std::vector<UpgraderEntry>> operatorVersionMap({
{"aten::logspace",
{{9,
"logspace_0_8",
"aten::logspace(Scalar start, Scalar end, int? steps=None, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"}}},
{"aten::logspace.out",
{{9,
"logspace_out_0_8",
"aten::logspace.out(Scalar start, Scalar end, int? steps=None, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::linspace",
{{8,
"linspace_0_7",
"aten::linspace(Scalar start, Scalar end, int? steps=None, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"}}},
{"aten::linspace.out",
{{8,
"linspace_out_0_7",
"aten::linspace.out(Scalar start, Scalar end, int? steps=None, *, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::div.Tensor",
{{4,
"div_Tensor_0_3",
"aten::div.Tensor(Tensor self, Tensor other) -> Tensor"}}},
{"aten::div.Tensor_mode",
{{4,
"div_Tensor_mode_0_3",
"aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor"}}},
{"aten::div.Scalar",
{{4,
"div_Scalar_0_3",
"aten::div.Scalar(Tensor self, Scalar other) -> Tensor"}}},
{"aten::div.Scalar_mode",
{{4,
"div_Scalar_mode_0_3",
"aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor"}}},
{"aten::div.out",
{{4,
"div_out_0_3",
"aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::div.out_mode",
{{4,
"div_out_mode_0_3",
"aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::div_.Tensor",
{{4,
"div__Tensor_0_3",
"aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"}}},
{"aten::div_.Tensor_mode",
{{4,
"div__Tensor_mode_0_3",
"aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)"}}},
{"aten::div_.Scalar",
{{4,
"div__Scalar_0_3",
"aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"}}},
{"aten::div_.Scalar_mode",
{{4,
"div__Scalar_mode_0_3",
"aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)"}}},
{"aten::full",
{{5,
"full_0_4",
"aten::full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"}}},
{"aten::full.names",
{{5,
"full_names_0_4",
"aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"}}},
{"aten::full.out",
{{5,
"full_out_0_4",
"aten::full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"}}},
{"aten::gelu", {{10, "gelu_0_9", "aten::gelu(Tensor self) -> Tensor"}}},
{"aten::gelu.out",
{{10,
"gelu_out_0_9",
"aten::gelu.out(Tensor self, *, Tensor(a!) out) -> Tensor"}}},
{"aten::random_.from_int",
{{11,
"random__from_0_10",
"aten::random_.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor"}}},
{"aten::random.from_int",
{{11,
"random_from_0_10",
"aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor"}}},
{"aten::random.from_int_out",
{{11,
"random_from_out_0_10",
"aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor"}}},
});
const std::unordered_map<std::string, std::vector<UpgraderEntry>>&
get_operator_version_map() {