mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[BE]: Apply FURB118 (prev): replaces unnecessary lambdas with operator. (#116027)
This replaces a bunch of unnecessary lambdas with the operator package. This is semantically equivalent, but the operator package is faster, and arguably more readable. When the FURB rules are taken out of preview, I will enable it as a ruff check. Pull Request resolved: https://github.com/pytorch/pytorch/pull/116027 Approved by: https://github.com/malfet
This commit is contained in:
parent
2d2016fdf8
commit
6de28e92d2
|
|
@ -1,3 +1,4 @@
|
||||||
|
import operator
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
|
@ -75,9 +76,7 @@ class AgentBase:
|
||||||
batch (bool): Whether to process and respond to observer requests as a batch or 1 at a time
|
batch (bool): Whether to process and respond to observer requests as a batch or 1 at a time
|
||||||
"""
|
"""
|
||||||
self.batch = batch
|
self.batch = batch
|
||||||
self.policy = Policy(
|
self.policy = Policy(reduce(operator.mul, state_size), nlayers, out_features)
|
||||||
reduce((lambda x, y: x * y), state_size), nlayers, out_features
|
|
||||||
)
|
|
||||||
self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
|
self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
|
||||||
|
|
||||||
self.batch_size = batch_size
|
self.batch_size = batch_size
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
import itertools
|
import itertools
|
||||||
|
import operator
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
|
@ -262,9 +263,9 @@ class BroadcastBench(benchmark.Benchmark):
|
||||||
|
|
||||||
def register_broadcast_ops():
|
def register_broadcast_ops():
|
||||||
binary_op_list = [
|
binary_op_list = [
|
||||||
["mul", lambda a, b: a * b],
|
["mul", operator.mul],
|
||||||
["add", lambda a, b: a + b],
|
["add", operator.add],
|
||||||
["sub", lambda a, b: a - b],
|
["sub", operator.sub],
|
||||||
["div", lambda a, b: a / (b + 1e-4)],
|
["div", lambda a, b: a / (b + 1e-4)],
|
||||||
[
|
[
|
||||||
"pow",
|
"pow",
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
import itertools
|
import itertools
|
||||||
|
import operator
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import scipy.special
|
import scipy.special
|
||||||
|
|
@ -116,9 +117,9 @@ class ElementBench(benchmark.Benchmark):
|
||||||
|
|
||||||
def register_element_ops():
|
def register_element_ops():
|
||||||
binary_op_list = [
|
binary_op_list = [
|
||||||
["mul", lambda a, b: a * b],
|
["mul", operator.mul],
|
||||||
["add", lambda a, b: a + b],
|
["add", operator.add],
|
||||||
["sub", lambda a, b: a - b],
|
["sub", operator.sub],
|
||||||
["div", lambda a, b: a / (b + 1e-4)],
|
["div", lambda a, b: a / (b + 1e-4)],
|
||||||
[
|
[
|
||||||
"pow",
|
"pow",
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
import argparse
|
import argparse
|
||||||
|
import operator
|
||||||
import time
|
import time
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
|
@ -105,10 +106,10 @@ def gen_float_comparison_tensors(N, M):
|
||||||
|
|
||||||
te_bool = te.Dtype.Bool
|
te_bool = te.Dtype.Bool
|
||||||
binary_ops = [
|
binary_ops = [
|
||||||
("add", (lambda a, b: a + b), torch.add),
|
("add", operator.add, torch.add),
|
||||||
("mul", (lambda a, b: a * b), torch.mul),
|
("mul", operator.mul, torch.mul),
|
||||||
("sub", (lambda a, b: a - b), torch.sub),
|
("sub", operator.sub, torch.sub),
|
||||||
("div", (lambda a, b: a / b), torch.div),
|
("div", operator.truediv, torch.div),
|
||||||
(
|
(
|
||||||
"eq",
|
"eq",
|
||||||
(lambda a, b: te.Cast.make(te_bool, a == b)),
|
(lambda a, b: te.Cast.make(te_bool, a == b)),
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,7 @@ from torch.testing._internal.common_dtype import floating_types_and
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
from torch.autograd import gradcheck, gradgradcheck
|
from torch.autograd import gradcheck, gradgradcheck
|
||||||
|
import operator
|
||||||
|
|
||||||
|
|
||||||
class TestAvgPool(TestCase):
|
class TestAvgPool(TestCase):
|
||||||
|
|
@ -42,11 +43,11 @@ class TestAvgPool(TestCase):
|
||||||
return joined_x.view(1, joined_x.numel())
|
return joined_x.view(1, joined_x.numel())
|
||||||
|
|
||||||
def _avg_pool2d(self, x, kernel_size):
|
def _avg_pool2d(self, x, kernel_size):
|
||||||
size = reduce((lambda x, y: x * y), kernel_size)
|
size = reduce(operator.mul, kernel_size)
|
||||||
return self._sum_pool2d(x, kernel_size) / size
|
return self._sum_pool2d(x, kernel_size) / size
|
||||||
|
|
||||||
def _avg_pool3d(self, x, kernel_size):
|
def _avg_pool3d(self, x, kernel_size):
|
||||||
size = reduce((lambda x, y: x * y), kernel_size)
|
size = reduce(operator.mul, kernel_size)
|
||||||
return self._sum_pool3d(x, kernel_size) / size
|
return self._sum_pool3d(x, kernel_size) / size
|
||||||
|
|
||||||
def test_doubletensor_avg_pool2d(self):
|
def test_doubletensor_avg_pool2d(self):
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,7 @@ import glob
|
||||||
import inspect
|
import inspect
|
||||||
import io
|
import io
|
||||||
import itertools
|
import itertools
|
||||||
|
import operator
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
@ -172,27 +173,27 @@ class TestOperators(common_utils.TestCase):
|
||||||
def test_add_broadcast(self):
|
def test_add_broadcast(self):
|
||||||
x = torch.randn(2, 3, requires_grad=True).double()
|
x = torch.randn(2, 3, requires_grad=True).double()
|
||||||
y = torch.randn(3, requires_grad=True).double()
|
y = torch.randn(3, requires_grad=True).double()
|
||||||
self.assertONNX(lambda x, y: x + y, (x, y))
|
self.assertONNX(operator.add, (x, y))
|
||||||
|
|
||||||
def test_add_left_broadcast(self):
|
def test_add_left_broadcast(self):
|
||||||
x = torch.randn(3, requires_grad=True).double()
|
x = torch.randn(3, requires_grad=True).double()
|
||||||
y = torch.randn(2, 3, requires_grad=True).double()
|
y = torch.randn(2, 3, requires_grad=True).double()
|
||||||
self.assertONNX(lambda x, y: x + y, (x, y))
|
self.assertONNX(operator.add, (x, y))
|
||||||
|
|
||||||
def test_add_size1_broadcast(self):
|
def test_add_size1_broadcast(self):
|
||||||
x = torch.randn(2, 3, requires_grad=True).double()
|
x = torch.randn(2, 3, requires_grad=True).double()
|
||||||
y = torch.randn(2, 1, requires_grad=True).double()
|
y = torch.randn(2, 1, requires_grad=True).double()
|
||||||
self.assertONNX(lambda x, y: x + y, (x, y))
|
self.assertONNX(operator.add, (x, y))
|
||||||
|
|
||||||
def test_add_size1_right_broadcast(self):
|
def test_add_size1_right_broadcast(self):
|
||||||
x = torch.randn(2, 3, requires_grad=True).double()
|
x = torch.randn(2, 3, requires_grad=True).double()
|
||||||
y = torch.randn(3, requires_grad=True).double()
|
y = torch.randn(3, requires_grad=True).double()
|
||||||
self.assertONNX(lambda x, y: x + y, (x, y))
|
self.assertONNX(operator.add, (x, y))
|
||||||
|
|
||||||
def test_add_size1_singleton_broadcast(self):
|
def test_add_size1_singleton_broadcast(self):
|
||||||
x = torch.randn(2, 3, requires_grad=True).double()
|
x = torch.randn(2, 3, requires_grad=True).double()
|
||||||
y = torch.randn(1, 3, requires_grad=True).double()
|
y = torch.randn(1, 3, requires_grad=True).double()
|
||||||
self.assertONNX(lambda x, y: x + y, (x, y))
|
self.assertONNX(operator.add, (x, y))
|
||||||
|
|
||||||
def test_rsub(self):
|
def test_rsub(self):
|
||||||
x = torch.randn(2, 3, requires_grad=True).double()
|
x = torch.randn(2, 3, requires_grad=True).double()
|
||||||
|
|
@ -541,27 +542,27 @@ class TestOperators(common_utils.TestCase):
|
||||||
def test_equal(self):
|
def test_equal(self):
|
||||||
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
|
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
|
||||||
y = torch.randn(1, 4, requires_grad=False).int()
|
y = torch.randn(1, 4, requires_grad=False).int()
|
||||||
self.assertONNX(lambda x, y: x == y, (x, y))
|
self.assertONNX(operator.eq, (x, y))
|
||||||
|
|
||||||
def test_lt(self):
|
def test_lt(self):
|
||||||
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
|
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
|
||||||
y = torch.randn(1, 4, requires_grad=False).int()
|
y = torch.randn(1, 4, requires_grad=False).int()
|
||||||
self.assertONNX(lambda x, y: x < y, (x, y))
|
self.assertONNX(operator.lt, (x, y))
|
||||||
|
|
||||||
def test_gt(self):
|
def test_gt(self):
|
||||||
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
|
x = torch.randn(1, 2, 3, 1, requires_grad=False).int()
|
||||||
y = torch.randn(1, 4, requires_grad=False).int()
|
y = torch.randn(1, 4, requires_grad=False).int()
|
||||||
self.assertONNX(lambda x, y: x > y, (x, y))
|
self.assertONNX(operator.gt, (x, y))
|
||||||
|
|
||||||
def test_le(self):
|
def test_le(self):
|
||||||
x = torch.randn(3, 4, requires_grad=False).int()
|
x = torch.randn(3, 4, requires_grad=False).int()
|
||||||
y = torch.randn(3, 4, requires_grad=False).int()
|
y = torch.randn(3, 4, requires_grad=False).int()
|
||||||
self.assertONNX(lambda x, y: x <= y, (x, y))
|
self.assertONNX(operator.le, (x, y))
|
||||||
|
|
||||||
def test_ge(self):
|
def test_ge(self):
|
||||||
x = torch.randn(3, 4, requires_grad=False).int()
|
x = torch.randn(3, 4, requires_grad=False).int()
|
||||||
y = torch.randn(3, 4, requires_grad=False).int()
|
y = torch.randn(3, 4, requires_grad=False).int()
|
||||||
self.assertONNX(lambda x, y: x >= y, (x, y))
|
self.assertONNX(operator.ge, (x, y))
|
||||||
|
|
||||||
def test_exp(self):
|
def test_exp(self):
|
||||||
x = torch.randn(3, 4, requires_grad=True)
|
x = torch.randn(3, 4, requires_grad=True)
|
||||||
|
|
@ -862,7 +863,7 @@ class TestOperators(common_utils.TestCase):
|
||||||
def test_master_opset(self):
|
def test_master_opset(self):
|
||||||
x = torch.randn(2, 3).float()
|
x = torch.randn(2, 3).float()
|
||||||
y = torch.randn(2, 3).float()
|
y = torch.randn(2, 3).float()
|
||||||
self.assertONNX(lambda x, y: x + y, (x, y), opset_version=10)
|
self.assertONNX(operator.add, (x, y), opset_version=10)
|
||||||
|
|
||||||
def test_std(self):
|
def test_std(self):
|
||||||
x = torch.randn(2, 3, 4).float()
|
x = torch.randn(2, 3, 4).float()
|
||||||
|
|
|
||||||
|
|
@ -2980,17 +2980,17 @@ class TestBinaryUfuncs(TestCase):
|
||||||
@onlyCPU
|
@onlyCPU
|
||||||
@dtypes(torch.float)
|
@dtypes(torch.float)
|
||||||
def test_cdiv(self, device, dtype):
|
def test_cdiv(self, device, dtype):
|
||||||
self._test_cop(torch.div, lambda x, y: x / y, dtype, device)
|
self._test_cop(torch.div, operator.truediv, dtype, device)
|
||||||
|
|
||||||
@onlyCPU
|
@onlyCPU
|
||||||
@dtypes(torch.float)
|
@dtypes(torch.float)
|
||||||
def test_cremainder(self, device, dtype):
|
def test_cremainder(self, device, dtype):
|
||||||
self._test_cop(torch.remainder, lambda x, y: x % y, dtype, device)
|
self._test_cop(torch.remainder, operator.mod, dtype, device)
|
||||||
|
|
||||||
@onlyCPU
|
@onlyCPU
|
||||||
@dtypes(torch.float)
|
@dtypes(torch.float)
|
||||||
def test_cmul(self, device, dtype):
|
def test_cmul(self, device, dtype):
|
||||||
self._test_cop(torch.mul, lambda x, y: x * y, dtype, device)
|
self._test_cop(torch.mul, operator.mul, dtype, device)
|
||||||
|
|
||||||
@onlyCPU
|
@onlyCPU
|
||||||
@dtypes(torch.float)
|
@dtypes(torch.float)
|
||||||
|
|
|
||||||
|
|
@ -63,6 +63,7 @@ from torch.utils.data.datapipes.utils.snapshot import (
|
||||||
from torch.utils.data.datapipes.dataframe import CaptureDataFrame
|
from torch.utils.data.datapipes.dataframe import CaptureDataFrame
|
||||||
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
|
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
|
||||||
from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
|
from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
|
||||||
|
import operator
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import dill
|
import dill
|
||||||
|
|
@ -1361,8 +1362,8 @@ class TestFunctionalIterDataPipe(TestCase):
|
||||||
# Unmatched input columns with fn arguments
|
# Unmatched input columns with fn arguments
|
||||||
_helper(None, fn_n1, 1, error=ValueError)
|
_helper(None, fn_n1, 1, error=ValueError)
|
||||||
_helper(None, fn_n1, [0, 1, 2], error=ValueError)
|
_helper(None, fn_n1, [0, 1, 2], error=ValueError)
|
||||||
_helper(None, lambda d0, d1: d0 + d1, 0, error=ValueError)
|
_helper(None, operator.add, 0, error=ValueError)
|
||||||
_helper(None, lambda d0, d1: d0 + d1, [0, 1, 2], error=ValueError)
|
_helper(None, operator.add, [0, 1, 2], error=ValueError)
|
||||||
_helper(None, fn_cmplx, 0, 1, ValueError)
|
_helper(None, fn_cmplx, 0, 1, ValueError)
|
||||||
_helper(None, fn_n1_pos, 1, error=ValueError)
|
_helper(None, fn_n1_pos, 1, error=ValueError)
|
||||||
_helper(None, fn_n1_def, [0, 1, 2], 1, error=ValueError)
|
_helper(None, fn_n1_def, [0, 1, 2], 1, error=ValueError)
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ from torch.testing._internal.common_utils import (
|
||||||
from torch.testing._internal.common_device_type import (
|
from torch.testing._internal.common_device_type import (
|
||||||
instantiate_device_type_tests, onlyCUDA, dtypes, dtypesIfCPU, dtypesIfCUDA,
|
instantiate_device_type_tests, onlyCUDA, dtypes, dtypesIfCPU, dtypesIfCUDA,
|
||||||
onlyNativeDeviceTypes, skipXLA)
|
onlyNativeDeviceTypes, skipXLA)
|
||||||
|
import operator
|
||||||
|
|
||||||
|
|
||||||
class TestIndexing(TestCase):
|
class TestIndexing(TestCase):
|
||||||
|
|
@ -138,7 +139,7 @@ class TestIndexing(TestCase):
|
||||||
def consec(size, start=1):
|
def consec(size, start=1):
|
||||||
# Creates the sequence in float since CPU half doesn't support the
|
# Creates the sequence in float since CPU half doesn't support the
|
||||||
# needed operations. Converts to dtype before returning.
|
# needed operations. Converts to dtype before returning.
|
||||||
numel = reduce(lambda x, y: x * y, size, 1)
|
numel = reduce(operator.mul, size, 1)
|
||||||
sequence = torch.ones(numel, dtype=torch.float, device=device).cumsum(0)
|
sequence = torch.ones(numel, dtype=torch.float, device=device).cumsum(0)
|
||||||
sequence.add_(start - 1)
|
sequence.add_(start - 1)
|
||||||
return sequence.view(*size).to(dtype=dtype)
|
return sequence.view(*size).to(dtype=dtype)
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,7 @@ from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, SM90Or
|
||||||
_get_torch_cuda_version
|
_get_torch_cuda_version
|
||||||
from torch.distributions.binomial import Binomial
|
from torch.distributions.binomial import Binomial
|
||||||
import torch.backends.opt_einsum as opt_einsum
|
import torch.backends.opt_einsum as opt_einsum
|
||||||
|
import operator
|
||||||
|
|
||||||
# Protects against includes accidentally setting the default dtype
|
# Protects against includes accidentally setting the default dtype
|
||||||
assert torch.get_default_dtype() is torch.float32
|
assert torch.get_default_dtype() is torch.float32
|
||||||
|
|
@ -7008,7 +7009,7 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
|
||||||
# mat_chars denotes matrix characteristics
|
# mat_chars denotes matrix characteristics
|
||||||
# possible values are: sym, sym_psd, sym_pd, sing, non_sym
|
# possible values are: sym, sym_psd, sym_pd, sing, non_sym
|
||||||
def run_test(matsize, batchdims, mat_chars):
|
def run_test(matsize, batchdims, mat_chars):
|
||||||
num_matrices = reduce(lambda x, y: x * y, batchdims, 1)
|
num_matrices = reduce(operator.mul, batchdims, 1)
|
||||||
list_of_matrices = []
|
list_of_matrices = []
|
||||||
|
|
||||||
for idx in range(num_matrices):
|
for idx in range(num_matrices):
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,7 @@ import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import torch.utils._pytree as pytree
|
import torch.utils._pytree as pytree
|
||||||
from itertools import product
|
from itertools import product
|
||||||
|
import operator
|
||||||
|
|
||||||
test_consistency_op_db = copy.deepcopy(op_db)
|
test_consistency_op_db = copy.deepcopy(op_db)
|
||||||
test_error_inputs_op_db = copy.deepcopy(op_db)
|
test_error_inputs_op_db = copy.deepcopy(op_db)
|
||||||
|
|
@ -1388,11 +1389,11 @@ class TestAvgPool(TestCaseMPS):
|
||||||
return joined_x.view(1, joined_x.numel())
|
return joined_x.view(1, joined_x.numel())
|
||||||
|
|
||||||
def _avg_pool2d(self, x, kernel_size):
|
def _avg_pool2d(self, x, kernel_size):
|
||||||
size = reduce((lambda x, y: x * y), kernel_size)
|
size = reduce(operator.mul, kernel_size)
|
||||||
return self._sum_pool2d(x, kernel_size) / size
|
return self._sum_pool2d(x, kernel_size) / size
|
||||||
|
|
||||||
def _avg_pool3d(self, x, kernel_size):
|
def _avg_pool3d(self, x, kernel_size):
|
||||||
size = reduce((lambda x, y: x * y), kernel_size)
|
size = reduce(operator.mul, kernel_size)
|
||||||
return self._sum_pool3d(x, kernel_size) / size
|
return self._sum_pool3d(x, kernel_size) / size
|
||||||
|
|
||||||
def test_avg_pool2d_with_zero_divisor(self):
|
def test_avg_pool2d_with_zero_divisor(self):
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,7 @@ from torch.testing._internal.common_dtype import (
|
||||||
all_types_and_complex, floating_and_complex_types_and)
|
all_types_and_complex, floating_and_complex_types_and)
|
||||||
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
|
from torch.testing._internal.opinfo.definitions.sparse import validate_sample_input_sparse
|
||||||
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
|
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
|
||||||
|
import operator
|
||||||
|
|
||||||
if TEST_SCIPY:
|
if TEST_SCIPY:
|
||||||
import scipy.sparse as sp
|
import scipy.sparse as sp
|
||||||
|
|
@ -3310,7 +3311,7 @@ class TestSparseCSR(TestCase):
|
||||||
|
|
||||||
# random bool vector w/ length equal to max possible nnz for the sparse_shape
|
# random bool vector w/ length equal to max possible nnz for the sparse_shape
|
||||||
mask_source = make_tensor(batch_mask_shape, dtype=torch.bool, device=device).flatten()
|
mask_source = make_tensor(batch_mask_shape, dtype=torch.bool, device=device).flatten()
|
||||||
n_batch = functools.reduce(lambda x, y: x * y, batch_shape, 1)
|
n_batch = functools.reduce(operator.mul, batch_shape, 1)
|
||||||
|
|
||||||
# stack random permutations of the source for each batch
|
# stack random permutations of the source for each batch
|
||||||
mask = torch.stack([mask_source[torch.randperm(mask_source.numel())]
|
mask = torch.stack([mask_source[torch.randperm(mask_source.numel())]
|
||||||
|
|
|
||||||
|
|
@ -29,6 +29,7 @@ from torch.testing._internal import opinfo
|
||||||
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
|
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
|
||||||
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
|
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
|
||||||
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
|
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
|
||||||
|
import operator
|
||||||
|
|
||||||
# For testing TestCase methods and torch.testing functions
|
# For testing TestCase methods and torch.testing functions
|
||||||
class TestTesting(TestCase):
|
class TestTesting(TestCase):
|
||||||
|
|
@ -1427,7 +1428,7 @@ class TestMakeTensor(TestCase):
|
||||||
@parametrize("noncontiguous", [False, True])
|
@parametrize("noncontiguous", [False, True])
|
||||||
@parametrize("shape", [tuple(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
|
@parametrize("shape", [tuple(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
|
||||||
def test_noncontiguous(self, dtype, device, noncontiguous, shape):
|
def test_noncontiguous(self, dtype, device, noncontiguous, shape):
|
||||||
numel = functools.reduce(lambda a, b: a * b, shape, 1)
|
numel = functools.reduce(operator.mul, shape, 1)
|
||||||
|
|
||||||
t = torch.testing.make_tensor(shape, dtype=dtype, device=device, noncontiguous=noncontiguous)
|
t = torch.testing.make_tensor(shape, dtype=dtype, device=device, noncontiguous=noncontiguous)
|
||||||
self.assertEqual(t.is_contiguous(), not noncontiguous or numel < 2)
|
self.assertEqual(t.is_contiguous(), not noncontiguous or numel < 2)
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ from torch.testing._internal.common_dtype import (
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import operator
|
||||||
|
|
||||||
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
|
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
|
||||||
# sharding on sandcastle. This line silences flake warnings
|
# sharding on sandcastle. This line silences flake warnings
|
||||||
|
|
@ -550,37 +551,37 @@ class TestTypePromotion(TestCase):
|
||||||
name="lt",
|
name="lt",
|
||||||
out_op=lambda x, y, d: torch.lt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
out_op=lambda x, y, d: torch.lt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
||||||
ret_op=lambda x, y: torch.lt(x, y),
|
ret_op=lambda x, y: torch.lt(x, y),
|
||||||
compare_op=lambda x, y: x < y,
|
compare_op=operator.lt,
|
||||||
),
|
),
|
||||||
dict(
|
dict(
|
||||||
name="le",
|
name="le",
|
||||||
out_op=lambda x, y, d: torch.le(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
out_op=lambda x, y, d: torch.le(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
||||||
ret_op=lambda x, y: torch.le(x, y),
|
ret_op=lambda x, y: torch.le(x, y),
|
||||||
compare_op=lambda x, y: x <= y,
|
compare_op=operator.le,
|
||||||
),
|
),
|
||||||
dict(
|
dict(
|
||||||
name="gt",
|
name="gt",
|
||||||
out_op=lambda x, y, d: torch.gt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
out_op=lambda x, y, d: torch.gt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
||||||
ret_op=lambda x, y: torch.gt(x, y),
|
ret_op=lambda x, y: torch.gt(x, y),
|
||||||
compare_op=lambda x, y: x > y,
|
compare_op=operator.gt,
|
||||||
),
|
),
|
||||||
dict(
|
dict(
|
||||||
name="ge",
|
name="ge",
|
||||||
out_op=lambda x, y, d: torch.ge(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
out_op=lambda x, y, d: torch.ge(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
||||||
ret_op=lambda x, y: torch.ge(x, y),
|
ret_op=lambda x, y: torch.ge(x, y),
|
||||||
compare_op=lambda x, y: x >= y,
|
compare_op=operator.ge,
|
||||||
),
|
),
|
||||||
dict(
|
dict(
|
||||||
name="eq",
|
name="eq",
|
||||||
out_op=lambda x, y, d: torch.eq(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
out_op=lambda x, y, d: torch.eq(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
||||||
ret_op=lambda x, y: torch.eq(x, y),
|
ret_op=lambda x, y: torch.eq(x, y),
|
||||||
compare_op=lambda x, y: x == y,
|
compare_op=operator.eq,
|
||||||
),
|
),
|
||||||
dict(
|
dict(
|
||||||
name="ne",
|
name="ne",
|
||||||
out_op=lambda x, y, d: torch.ne(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
out_op=lambda x, y, d: torch.ne(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
|
||||||
ret_op=lambda x, y: torch.ne(x, y),
|
ret_op=lambda x, y: torch.ne(x, y),
|
||||||
compare_op=lambda x, y: x != y,
|
compare_op=operator.ne,
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
for op in comparison_ops:
|
for op in comparison_ops:
|
||||||
|
|
@ -627,12 +628,12 @@ class TestTypePromotion(TestCase):
|
||||||
@onlyNativeDeviceTypes
|
@onlyNativeDeviceTypes
|
||||||
def test_complex_assertraises(self, device):
|
def test_complex_assertraises(self, device):
|
||||||
comparison_ops = [
|
comparison_ops = [
|
||||||
dict(name="lt", compare_op=lambda x, y: x < y, ),
|
dict(name="lt", compare_op=operator.lt, ),
|
||||||
dict(name="le", compare_op=lambda x, y: x <= y, ),
|
dict(name="le", compare_op=operator.le, ),
|
||||||
dict(name="gt", compare_op=lambda x, y: x > y, ),
|
dict(name="gt", compare_op=operator.gt, ),
|
||||||
dict(name="ge", compare_op=lambda x, y: x >= y, ),
|
dict(name="ge", compare_op=operator.ge, ),
|
||||||
dict(name="eq", compare_op=lambda x, y: x == y, ),
|
dict(name="eq", compare_op=operator.eq, ),
|
||||||
dict(name="ne", compare_op=lambda x, y: x != y, ),
|
dict(name="ne", compare_op=operator.ne, ),
|
||||||
]
|
]
|
||||||
for op in comparison_ops:
|
for op in comparison_ops:
|
||||||
is_cuda = torch.device(device).type == 'cuda'
|
is_cuda = torch.device(device).type == 'cuda'
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@ import pytest
|
||||||
IS_WASM = False
|
IS_WASM = False
|
||||||
HAS_REFCOUNT = True
|
HAS_REFCOUNT = True
|
||||||
|
|
||||||
|
import operator
|
||||||
from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
|
from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
|
||||||
|
|
||||||
from hypothesis import given, strategies as st
|
from hypothesis import given, strategies as st
|
||||||
|
|
@ -701,25 +702,19 @@ class TestFloatExceptions(TestCase):
|
||||||
# The value of tiny for double double is NaN, so we need to
|
# The value of tiny for double double is NaN, so we need to
|
||||||
# pass the assert
|
# pass the assert
|
||||||
if not np.isnan(ft_tiny):
|
if not np.isnan(ft_tiny):
|
||||||
self.assert_raises_fpe(underflow, lambda a, b: a / b, ft_tiny, ft_max)
|
self.assert_raises_fpe(underflow, operator.truediv, ft_tiny, ft_max)
|
||||||
self.assert_raises_fpe(underflow, lambda a, b: a * b, ft_tiny, ft_tiny)
|
self.assert_raises_fpe(underflow, operator.mul, ft_tiny, ft_tiny)
|
||||||
self.assert_raises_fpe(overflow, lambda a, b: a * b, ft_max, ftype(2))
|
self.assert_raises_fpe(overflow, operator.mul, ft_max, ftype(2))
|
||||||
self.assert_raises_fpe(overflow, lambda a, b: a / b, ft_max, ftype(0.5))
|
self.assert_raises_fpe(overflow, operator.truediv, ft_max, ftype(0.5))
|
||||||
self.assert_raises_fpe(overflow, lambda a, b: a + b, ft_max, ft_max * ft_eps)
|
self.assert_raises_fpe(overflow, operator.add, ft_max, ft_max * ft_eps)
|
||||||
self.assert_raises_fpe(overflow, lambda a, b: a - b, -ft_max, ft_max * ft_eps)
|
self.assert_raises_fpe(overflow, operator.sub, -ft_max, ft_max * ft_eps)
|
||||||
self.assert_raises_fpe(overflow, np.power, ftype(2), ftype(2**fi.nexp))
|
self.assert_raises_fpe(overflow, np.power, ftype(2), ftype(2**fi.nexp))
|
||||||
self.assert_raises_fpe(divbyzero, lambda a, b: a / b, ftype(1), ftype(0))
|
self.assert_raises_fpe(divbyzero, operator.truediv, ftype(1), ftype(0))
|
||||||
self.assert_raises_fpe(
|
self.assert_raises_fpe(invalid, operator.truediv, ftype(np.inf), ftype(np.inf))
|
||||||
invalid, lambda a, b: a / b, ftype(np.inf), ftype(np.inf)
|
self.assert_raises_fpe(invalid, operator.truediv, ftype(0), ftype(0))
|
||||||
)
|
self.assert_raises_fpe(invalid, operator.sub, ftype(np.inf), ftype(np.inf))
|
||||||
self.assert_raises_fpe(invalid, lambda a, b: a / b, ftype(0), ftype(0))
|
self.assert_raises_fpe(invalid, operator.add, ftype(np.inf), ftype(-np.inf))
|
||||||
self.assert_raises_fpe(
|
self.assert_raises_fpe(invalid, operator.mul, ftype(0), ftype(np.inf))
|
||||||
invalid, lambda a, b: a - b, ftype(np.inf), ftype(np.inf)
|
|
||||||
)
|
|
||||||
self.assert_raises_fpe(
|
|
||||||
invalid, lambda a, b: a + b, ftype(np.inf), ftype(-np.inf)
|
|
||||||
)
|
|
||||||
self.assert_raises_fpe(invalid, lambda a, b: a * b, ftype(0), ftype(np.inf))
|
|
||||||
|
|
||||||
@skipif(IS_WASM, reason="no wasm fp exception support")
|
@skipif(IS_WASM, reason="no wasm fp exception support")
|
||||||
def test_warnings(self):
|
def test_warnings(self):
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
import operator
|
||||||
import sys
|
import sys
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from copy import copy
|
from copy import copy
|
||||||
|
|
@ -179,14 +180,10 @@ class TestPrioritizations:
|
||||||
self._test_priorities[new_relevance.value].append(upgraded_tests)
|
self._test_priorities[new_relevance.value].append(upgraded_tests)
|
||||||
|
|
||||||
def set_test_relevance(self, test_run: TestRun, new_relevance: Relevance) -> None:
|
def set_test_relevance(self, test_run: TestRun, new_relevance: Relevance) -> None:
|
||||||
return self._update_test_relevance(
|
return self._update_test_relevance(test_run, new_relevance, operator.eq)
|
||||||
test_run, new_relevance, lambda curr, new: curr == new
|
|
||||||
)
|
|
||||||
|
|
||||||
def raise_test_relevance(self, test_run: TestRun, new_relevance: Relevance) -> None:
|
def raise_test_relevance(self, test_run: TestRun, new_relevance: Relevance) -> None:
|
||||||
return self._update_test_relevance(
|
return self._update_test_relevance(test_run, new_relevance, operator.ge)
|
||||||
test_run, new_relevance, lambda curr, new: curr >= new
|
|
||||||
)
|
|
||||||
|
|
||||||
def validate_test_priorities(self) -> None:
|
def validate_test_priorities(self) -> None:
|
||||||
# Union all TestRuns that contain include/exclude pairs
|
# Union all TestRuns that contain include/exclude pairs
|
||||||
|
|
|
||||||
|
|
@ -680,7 +680,7 @@ if torch._C._has_mkldnn:
|
||||||
:-1
|
:-1
|
||||||
] == torch.Size(reshape_2[:-1])
|
] == torch.Size(reshape_2[:-1])
|
||||||
can_remove_reshape = can_remove_reshape and (
|
can_remove_reshape = can_remove_reshape and (
|
||||||
reduce(lambda x, y: x * y, reshape_2[:-1]) == reshape_1[0]
|
reduce(operator.mul, reshape_2[:-1]) == reshape_1[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
if can_remove_reshape:
|
if can_remove_reshape:
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ from functools import reduce
|
||||||
from typing import Any, List, Optional, Tuple
|
from typing import Any, List, Optional, Tuple
|
||||||
|
|
||||||
from .base_data_sparsifier import BaseDataSparsifier
|
from .base_data_sparsifier import BaseDataSparsifier
|
||||||
|
import operator
|
||||||
|
|
||||||
__all__ = ['DataNormSparsifier']
|
__all__ = ['DataNormSparsifier']
|
||||||
|
|
||||||
|
|
@ -35,7 +36,7 @@ class DataNormSparsifier(BaseDataSparsifier):
|
||||||
sparse_block_shape: Tuple[int, int] = (1, 4),
|
sparse_block_shape: Tuple[int, int] = (1, 4),
|
||||||
zeros_per_block: Optional[int] = None, norm: str = 'L1'):
|
zeros_per_block: Optional[int] = None, norm: str = 'L1'):
|
||||||
if zeros_per_block is None:
|
if zeros_per_block is None:
|
||||||
zeros_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
|
zeros_per_block = reduce(operator.mul, sparse_block_shape)
|
||||||
|
|
||||||
assert norm in ['L1', 'L2'], "only L1 and L2 norm supported at the moment"
|
assert norm in ['L1', 'L2'], "only L1 and L2 norm supported at the moment"
|
||||||
|
|
||||||
|
|
@ -95,7 +96,7 @@ class DataNormSparsifier(BaseDataSparsifier):
|
||||||
data_norm = F.avg_pool2d(data[None, None, :], kernel_size=sparse_block_shape,
|
data_norm = F.avg_pool2d(data[None, None, :], kernel_size=sparse_block_shape,
|
||||||
stride=sparse_block_shape, ceil_mode=True)
|
stride=sparse_block_shape, ceil_mode=True)
|
||||||
|
|
||||||
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
|
values_per_block = reduce(operator.mul, sparse_block_shape)
|
||||||
|
|
||||||
data_norm = data_norm.flatten()
|
data_norm = data_norm.flatten()
|
||||||
num_blocks = len(data_norm)
|
num_blocks = len(data_norm)
|
||||||
|
|
@ -116,7 +117,7 @@ class DataNormSparsifier(BaseDataSparsifier):
|
||||||
def update_mask(self, name, data, sparsity_level,
|
def update_mask(self, name, data, sparsity_level,
|
||||||
sparse_block_shape, zeros_per_block, **kwargs):
|
sparse_block_shape, zeros_per_block, **kwargs):
|
||||||
|
|
||||||
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
|
values_per_block = reduce(operator.mul, sparse_block_shape)
|
||||||
if zeros_per_block > values_per_block:
|
if zeros_per_block > values_per_block:
|
||||||
raise ValueError("Number of zeros per block cannot be more than "
|
raise ValueError("Number of zeros per block cannot be more than "
|
||||||
"the total number of elements in that block.")
|
"the total number of elements in that block.")
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ import torch
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
|
|
||||||
from .base_sparsifier import BaseSparsifier
|
from .base_sparsifier import BaseSparsifier
|
||||||
|
import operator
|
||||||
|
|
||||||
__all__ = ["WeightNormSparsifier"]
|
__all__ = ["WeightNormSparsifier"]
|
||||||
|
|
||||||
|
|
@ -56,7 +57,7 @@ class WeightNormSparsifier(BaseSparsifier):
|
||||||
zeros_per_block: Optional[int] = None,
|
zeros_per_block: Optional[int] = None,
|
||||||
norm: Optional[Union[Callable, int]] = None):
|
norm: Optional[Union[Callable, int]] = None):
|
||||||
if zeros_per_block is None:
|
if zeros_per_block is None:
|
||||||
zeros_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
|
zeros_per_block = reduce(operator.mul, sparse_block_shape)
|
||||||
defaults = {
|
defaults = {
|
||||||
"sparsity_level": sparsity_level,
|
"sparsity_level": sparsity_level,
|
||||||
"sparse_block_shape": sparse_block_shape,
|
"sparse_block_shape": sparse_block_shape,
|
||||||
|
|
@ -108,7 +109,7 @@ class WeightNormSparsifier(BaseSparsifier):
|
||||||
mask.data = torch.ones_like(mask)
|
mask.data = torch.ones_like(mask)
|
||||||
return mask
|
return mask
|
||||||
|
|
||||||
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
|
values_per_block = reduce(operator.mul, sparse_block_shape)
|
||||||
if values_per_block > 1:
|
if values_per_block > 1:
|
||||||
# Reduce the data
|
# Reduce the data
|
||||||
data = F.avg_pool2d(
|
data = F.avg_pool2d(
|
||||||
|
|
@ -145,7 +146,7 @@ class WeightNormSparsifier(BaseSparsifier):
|
||||||
block_h, block_w = sparse_block_shape
|
block_h, block_w = sparse_block_shape
|
||||||
dh = (block_h - h % block_h) % block_h
|
dh = (block_h - h % block_h) % block_h
|
||||||
dw = (block_w - w % block_w) % block_w
|
dw = (block_w - w % block_w) % block_w
|
||||||
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
|
values_per_block = reduce(operator.mul, sparse_block_shape)
|
||||||
|
|
||||||
if mask is None:
|
if mask is None:
|
||||||
mask = torch.ones((h + dh, w + dw), device=data.device)
|
mask = torch.ones((h + dh, w + dw), device=data.device)
|
||||||
|
|
@ -174,7 +175,7 @@ class WeightNormSparsifier(BaseSparsifier):
|
||||||
|
|
||||||
def update_mask(self, module, tensor_name, sparsity_level, sparse_block_shape,
|
def update_mask(self, module, tensor_name, sparsity_level, sparse_block_shape,
|
||||||
zeros_per_block, **kwargs):
|
zeros_per_block, **kwargs):
|
||||||
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
|
values_per_block = reduce(operator.mul, sparse_block_shape)
|
||||||
if zeros_per_block > values_per_block:
|
if zeros_per_block > values_per_block:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Number of zeros per block cannot be more than the total number of elements in that block."
|
"Number of zeros per block cannot be more than the total number of elements in that block."
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
import operator
|
||||||
import warnings
|
import warnings
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
|
||||||
|
|
@ -31,7 +32,7 @@ class Resize(Function):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def forward(ctx, tensor, sizes):
|
def forward(ctx, tensor, sizes):
|
||||||
ctx.sizes = sizes
|
ctx.sizes = sizes
|
||||||
ctx.numel = reduce(lambda x, y: x * y, sizes, 1)
|
ctx.numel = reduce(operator.mul, sizes, 1)
|
||||||
if tensor.numel() != ctx.numel:
|
if tensor.numel() != ctx.numel:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
(
|
(
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
import operator
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -38,8 +39,8 @@ def check_onnx_broadcast(dims1, dims2):
|
||||||
supported = True
|
supported = True
|
||||||
len1 = len(dims1)
|
len1 = len(dims1)
|
||||||
len2 = len(dims2)
|
len2 = len(dims2)
|
||||||
numel1 = reduce(lambda x, y: x * y, dims1)
|
numel1 = reduce(operator.mul, dims1)
|
||||||
numel2 = reduce(lambda x, y: x * y, dims2)
|
numel2 = reduce(operator.mul, dims2)
|
||||||
if len1 < len2:
|
if len1 < len2:
|
||||||
broadcast = True
|
broadcast = True
|
||||||
if numel2 != 1:
|
if numel2 != 1:
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ import array
|
||||||
import enum
|
import enum
|
||||||
import functools
|
import functools
|
||||||
import logging
|
import logging
|
||||||
|
import operator
|
||||||
import struct
|
import struct
|
||||||
import sys
|
import sys
|
||||||
from typing import List, NamedTuple, Optional, Tuple
|
from typing import List, NamedTuple, Optional, Tuple
|
||||||
|
|
@ -1032,11 +1033,7 @@ class _NnapiSerializer:
|
||||||
|
|
||||||
out_shape = (
|
out_shape = (
|
||||||
in_oper.shape[:start_dim]
|
in_oper.shape[:start_dim]
|
||||||
+ (
|
+ (functools.reduce(operator.mul, in_oper.shape[start_dim : end_dim + 1]),)
|
||||||
functools.reduce(
|
|
||||||
lambda x, y: x * y, in_oper.shape[start_dim : end_dim + 1]
|
|
||||||
),
|
|
||||||
)
|
|
||||||
+ in_oper.shape[end_dim + 1 :]
|
+ in_oper.shape[end_dim + 1 :]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,7 @@ from .utils import (
|
||||||
)
|
)
|
||||||
from torch.distributed.remote_device import _remote_device
|
from torch.distributed.remote_device import _remote_device
|
||||||
from torch.utils import _pytree as pytree
|
from torch.utils import _pytree as pytree
|
||||||
|
import operator
|
||||||
|
|
||||||
# Tracking for sharded tensor objects.
|
# Tracking for sharded tensor objects.
|
||||||
_sharded_tensor_lock = threading.Lock()
|
_sharded_tensor_lock = threading.Lock()
|
||||||
|
|
@ -394,7 +395,7 @@ class ShardedTensor(ShardedTensorBase):
|
||||||
Default: ``None``
|
Default: ``None``
|
||||||
"""
|
"""
|
||||||
def shard_size(shard_md):
|
def shard_size(shard_md):
|
||||||
return reduce((lambda x, y: x * y), shard_md.shard_sizes) # type: ignore[attr-defined]
|
return reduce(operator.mul, shard_md.shard_sizes) # type: ignore[attr-defined]
|
||||||
|
|
||||||
if enforce_dtype:
|
if enforce_dtype:
|
||||||
warnings.warn("enforce_dtype is deprecated. Please use dtype instead.")
|
warnings.warn("enforce_dtype is deprecated. Please use dtype instead.")
|
||||||
|
|
|
||||||
|
|
@ -244,8 +244,8 @@ def reshape_inference_rule(n: Node):
|
||||||
elif isinstance(t1, TensorType):
|
elif isinstance(t1, TensorType):
|
||||||
assert isinstance(t1, TensorType)
|
assert isinstance(t1, TensorType)
|
||||||
a = [e if e != Dyn else 1 for e in t1.__args__]
|
a = [e if e != Dyn else 1 for e in t1.__args__]
|
||||||
p1 = reduce(lambda x, y: x * y, a)
|
p1 = reduce(operator.mul, a)
|
||||||
p2 = reduce(lambda x, y: x * y, t2)
|
p2 = reduce(operator.mul, t2)
|
||||||
if p1 % p2 == 0 or p2 % p1 == 0:
|
if p1 % p2 == 0 or p2 % p1 == 0:
|
||||||
n.type = t2_type
|
n.type = t2_type
|
||||||
return t2_type
|
return t2_type
|
||||||
|
|
@ -498,7 +498,7 @@ def flatten_check(tensor_type, start_dim, end_dim):
|
||||||
if Dyn in mid:
|
if Dyn in mid:
|
||||||
mid = [Dyn]
|
mid = [Dyn]
|
||||||
else:
|
else:
|
||||||
mid = [reduce(lambda x, y: x * y, my_args[start_dim:end_dim])]
|
mid = [reduce(operator.mul, my_args[start_dim:end_dim])]
|
||||||
new_type_list = lhs + mid + rhs
|
new_type_list = lhs + mid + rhs
|
||||||
return TensorType(tuple(new_type_list))
|
return TensorType(tuple(new_type_list))
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
|
|
@ -541,9 +541,9 @@ def _sympy_rshift(a, b):
|
||||||
|
|
||||||
|
|
||||||
reflectable_magic_methods = {
|
reflectable_magic_methods = {
|
||||||
"add": lambda a, b: a + b,
|
"add": operator.add,
|
||||||
"sub": lambda a, b: a - b,
|
"sub": operator.sub,
|
||||||
"mul": lambda a, b: a * b,
|
"mul": operator.mul,
|
||||||
"mod": _sympy_mod,
|
"mod": _sympy_mod,
|
||||||
"pow": _sympy_pow,
|
"pow": _sympy_pow,
|
||||||
"and": _sympy_and,
|
"and": _sympy_and,
|
||||||
|
|
@ -676,7 +676,7 @@ def _sympy_is_integer(a):
|
||||||
|
|
||||||
magic_methods = {
|
magic_methods = {
|
||||||
**reflectable_magic_methods,
|
**reflectable_magic_methods,
|
||||||
"sym_not": lambda a: ~a,
|
"sym_not": operator.invert,
|
||||||
"eq": _sympy_eq,
|
"eq": _sympy_eq,
|
||||||
"ne": _sympy_ne,
|
"ne": _sympy_ne,
|
||||||
"gt": _sympy_gt,
|
"gt": _sympy_gt,
|
||||||
|
|
@ -686,7 +686,7 @@ magic_methods = {
|
||||||
"floor": _sympy_floor,
|
"floor": _sympy_floor,
|
||||||
"sym_float": _sympy_sym_float,
|
"sym_float": _sympy_sym_float,
|
||||||
"ceil": _sympy_ceil,
|
"ceil": _sympy_ceil,
|
||||||
"neg": lambda a: -a,
|
"neg": operator.neg,
|
||||||
"sym_min": _sympy_min,
|
"sym_min": _sympy_min,
|
||||||
"sym_max": _sympy_max,
|
"sym_max": _sympy_max,
|
||||||
"sym_ite": _sympy_ite,
|
"sym_ite": _sympy_ite,
|
||||||
|
|
|
||||||
|
|
@ -41,6 +41,7 @@ from torch.testing._internal.distributed.multi_threaded_pg import (
|
||||||
_uninstall_threaded_pg,
|
_uninstall_threaded_pg,
|
||||||
ProcessLocalGroup,
|
ProcessLocalGroup,
|
||||||
)
|
)
|
||||||
|
import operator
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
@ -426,7 +427,7 @@ def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1):
|
||||||
|
|
||||||
def compute_sum(fn, world_size: int):
|
def compute_sum(fn, world_size: int):
|
||||||
return reduce(
|
return reduce(
|
||||||
lambda a, b: a + b, [fn(rank, world_size) for rank in range(world_size)]
|
operator.add, [fn(rank, world_size) for rank in range(world_size)]
|
||||||
)
|
)
|
||||||
|
|
||||||
return [
|
return [
|
||||||
|
|
|
||||||
|
|
@ -18219,7 +18219,7 @@ op_db: List[OpInfo] = [
|
||||||
}
|
}
|
||||||
""",
|
""",
|
||||||
num_outputs=1),
|
num_outputs=1),
|
||||||
ref=lambda i0, i1: i0 + i1,
|
ref=operator.add,
|
||||||
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
|
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool),
|
||||||
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42),
|
sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42),
|
||||||
supports_out=False,
|
supports_out=False,
|
||||||
|
|
|
||||||
|
|
@ -84,6 +84,7 @@ from torch.testing._internal.common_utils import (
|
||||||
import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer
|
import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer
|
||||||
|
|
||||||
from torch.utils.data.distributed import DistributedSampler
|
from torch.utils.data.distributed import DistributedSampler
|
||||||
|
import operator
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import torchvision
|
import torchvision
|
||||||
|
|
@ -2167,7 +2168,7 @@ class DistributedTest:
|
||||||
dist.ReduceOp.PRODUCT,
|
dist.ReduceOp.PRODUCT,
|
||||||
2,
|
2,
|
||||||
10,
|
10,
|
||||||
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
|
reduce(operator.mul, [10] * (len(group) - 1), 2),
|
||||||
)
|
)
|
||||||
|
|
||||||
@skip_but_pass_in_sandcastle_if(
|
@skip_but_pass_in_sandcastle_if(
|
||||||
|
|
@ -2233,7 +2234,7 @@ class DistributedTest:
|
||||||
dist.ReduceOp.PRODUCT,
|
dist.ReduceOp.PRODUCT,
|
||||||
2,
|
2,
|
||||||
10,
|
10,
|
||||||
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
|
reduce(operator.mul, [10] * (len(group) - 1), 2),
|
||||||
)
|
)
|
||||||
|
|
||||||
@skip_but_pass_in_sandcastle_if(
|
@skip_but_pass_in_sandcastle_if(
|
||||||
|
|
@ -2299,7 +2300,7 @@ class DistributedTest:
|
||||||
dist.ReduceOp.PRODUCT,
|
dist.ReduceOp.PRODUCT,
|
||||||
2,
|
2,
|
||||||
10,
|
10,
|
||||||
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
|
reduce(operator.mul, [10] * (len(group) - 1), 2),
|
||||||
)
|
)
|
||||||
|
|
||||||
@skip_but_pass_in_sandcastle_if(
|
@skip_but_pass_in_sandcastle_if(
|
||||||
|
|
@ -2821,7 +2822,7 @@ class DistributedTest:
|
||||||
dist.ReduceOp.PRODUCT,
|
dist.ReduceOp.PRODUCT,
|
||||||
2,
|
2,
|
||||||
10,
|
10,
|
||||||
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
|
reduce(operator.mul, [10] * (len(group) - 1), 2),
|
||||||
)
|
)
|
||||||
|
|
||||||
@skip_but_pass_in_sandcastle_if(
|
@skip_but_pass_in_sandcastle_if(
|
||||||
|
|
@ -2871,7 +2872,7 @@ class DistributedTest:
|
||||||
dist.ReduceOp.PRODUCT,
|
dist.ReduceOp.PRODUCT,
|
||||||
2,
|
2,
|
||||||
10,
|
10,
|
||||||
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
|
reduce(operator.mul, [10] * (len(group) - 1), 2),
|
||||||
)
|
)
|
||||||
|
|
||||||
@skip_if_small_worldsize
|
@skip_if_small_worldsize
|
||||||
|
|
@ -2921,7 +2922,7 @@ class DistributedTest:
|
||||||
dist.ReduceOp.PRODUCT,
|
dist.ReduceOp.PRODUCT,
|
||||||
2,
|
2,
|
||||||
10,
|
10,
|
||||||
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
|
reduce(operator.mul, [10] * (len(group) - 1), 2),
|
||||||
)
|
)
|
||||||
|
|
||||||
@skip_but_pass_in_sandcastle_if(
|
@skip_but_pass_in_sandcastle_if(
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@ from typing import (
|
||||||
import torch
|
import torch
|
||||||
from torch.utils.benchmark.utils import common, cpp_jit
|
from torch.utils.benchmark.utils import common, cpp_jit
|
||||||
from torch.utils.benchmark.utils._stubs import CallgrindModuleType
|
from torch.utils.benchmark.utils._stubs import CallgrindModuleType
|
||||||
|
import operator
|
||||||
|
|
||||||
|
|
||||||
__all__ = ["FunctionCount", "FunctionCounts", "CallgrindStats", "CopyIfCallgrind"]
|
__all__ = ["FunctionCount", "FunctionCounts", "CallgrindStats", "CopyIfCallgrind"]
|
||||||
|
|
@ -100,7 +101,7 @@ class FunctionCounts:
|
||||||
self,
|
self,
|
||||||
other: "FunctionCounts",
|
other: "FunctionCounts",
|
||||||
) -> "FunctionCounts":
|
) -> "FunctionCounts":
|
||||||
return self._merge(other, lambda c: -c)
|
return self._merge(other, operator.neg)
|
||||||
|
|
||||||
def __mul__(self, other: Union[int, float]) -> "FunctionCounts":
|
def __mul__(self, other: Union[int, float]) -> "FunctionCounts":
|
||||||
return self._from_dict({
|
return self._from_dict({
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user