pytorch/benchmarks/operator_benchmark/pt/binary_test.py
Arash Pakbin f3ddc08ddc Additional operators in operator benchmark (#145625)
The list of added operators:
add_, addcmul, arange, baddbmm…, bmm, clamp, div, div_, gelu, index_add, logical_and, mul_, sub_, topk, where

This pull request is the same as a previous one: https://github.com/pytorch/pytorch/pull/145121 which inadvertently got deleted while merging.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/145625
Approved by: https://github.com/jeffdaily
2025-01-26 19:20:02 +00:00

206 lines
5.1 KiB
Python

import operator_benchmark as op_bench
import torch
"""Microbenchmarks for binary operators."""
# Benchmark ops performance with broadcast
binary_ops_bcast_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[
["add", torch.add],
["sub", torch.sub],
["div", torch.div],
["mul", torch.mul],
],
)
# Configs with broadcast
binary_configs_broadcast = op_bench.config_list(
attr_names=["in_one", "in_two"],
attrs=[
[[64, 1, 64], [1, 64, 1]],
],
cross_product_configs={
"device": ["cpu"],
"dtype": [torch.float],
},
tags=["short"],
)
class BinaryOpBcastBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_one, in_two, dtype, device, op_func):
self.inputs = {
"in_one": torch.randn(in_one, device=device).to(dtype=dtype),
"in_two": torch.randn(in_two, device=device).to(dtype=dtype),
}
self.op_func = op_func
def forward(self, in_one, in_two):
return self.op_func(in_one, in_two)
op_bench.generate_pt_tests_from_op_list(
binary_ops_bcast_list, binary_configs_broadcast, BinaryOpBcastBenchmark
)
# Benchmark ops performance without broadcast
binary_ops_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[
["add", torch.add],
["sub", torch.sub],
["div", torch.div],
["mul", torch.mul],
],
)
binary_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
],
cross_product_configs={
"device": ["cpu", "cuda"],
"dtype_one": [torch.int32],
"dtype_two": [torch.int32],
},
tags=["short"],
)
binary_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=["cpu", "cuda"],
dtype_one=[torch.int8, torch.int32],
dtype_two=[torch.int8, torch.int32],
tags=["long"],
)
class BinaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype_one, dtype_two, op_func):
self.inputs = {
"input_one": torch.randn(M, N, K, device=device).to(dtype=dtype_one),
"input_two": torch.randn(M, N, K, device=device).to(dtype=dtype_two),
}
self.op_func = op_func
def forward(self, input_one, input_two):
return self.op_func(input_one, input_two)
op_bench.generate_pt_tests_from_op_list(
binary_ops_list, binary_short_configs + binary_long_configs, BinaryOpBenchmark
)
######
# Benchmark ops performance for boolean dtype
######
# Benchmark ops performance with broadcast
binary_ops_bcast_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[["logical_and", torch.logical_and]],
)
# Configs with broadcast
binary_configs_broadcast = op_bench.config_list(
attr_names=["in_one", "in_two"],
attrs=[
[[64, 1, 64], [1, 64, 1]],
],
cross_product_configs={
"device": ["cpu"],
"dtype": [torch.bool],
},
tags=["short"],
)
class BinaryOpBcastBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_one, in_two, dtype, device, op_func):
self.inputs = {
"in_one": torch.bernoulli(0.5 * torch.ones(in_one, device=device)).to(
dtype=dtype
),
"in_two": torch.bernoulli(0.5 * torch.ones(in_two, device=device)).to(
dtype=dtype
),
}
self.op_func = op_func
def forward(self, in_one, in_two):
return self.op_func(in_one, in_two)
op_bench.generate_pt_tests_from_op_list(
binary_ops_bcast_list, binary_configs_broadcast, BinaryOpBcastBenchmark
)
# Benchmark ops performance without broadcast
binary_ops_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[["logical_and", torch.logical_and]],
)
binary_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
],
cross_product_configs={
"device": ["cpu", "cuda"],
"dtype_one": [torch.bool],
"dtype_two": [torch.bool],
},
tags=["short"],
)
binary_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=["cpu", "cuda"],
dtype_one=[torch.bool, torch.bool],
dtype_two=[torch.bool, torch.bool],
tags=["long"],
)
class BinaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype_one, dtype_two, op_func):
self.inputs = {
"input_one": torch.bernoulli(0.5 * torch.ones(M, N, K, device=device)).to(
dtype=dtype_one
),
"input_two": torch.bernoulli(0.5 * torch.ones(M, N, K, device=device)).to(
dtype=dtype_two
),
}
self.op_func = op_func
def forward(self, input_one, input_two):
return self.op_func(input_one, input_two)
op_bench.generate_pt_tests_from_op_list(
binary_ops_list, binary_short_configs + binary_long_configs, BinaryOpBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()