mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
See https://github.com/pytorch/pytorch/pull/129751#issue-2380881501. Most changes are auto-generated by linter. You can review these PRs via: ```bash git diff --ignore-all-space --ignore-blank-lines HEAD~1 ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/129754 Approved by: https://github.com/ezyang
65 lines
1.6 KiB
Python
65 lines
1.6 KiB
Python
import time
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
|
|
|
|
|
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""
|
|
|
|
input_shapes = (
|
|
(4, 4, 1),
|
|
(16, 1, 32),
|
|
(64, 64, 1, 1),
|
|
(8, 256, 128),
|
|
(1, 64, 128, 32),
|
|
(512, 512),
|
|
)
|
|
|
|
repeats = (
|
|
(1, 1, 1, 64),
|
|
(1, 4, 1, 2),
|
|
(1, 2, 2, 15),
|
|
(1, 1, 3, 2),
|
|
(128, 1, 8, 1),
|
|
(1, 1, 2, 16),
|
|
)
|
|
|
|
NUM_WARMUP_ITERS = 5
|
|
NUM_BENCHMARK_ITERS = 10
|
|
DTYPE_TO_BYTES = {"float": 4}
|
|
|
|
|
|
def generate_data_for_repeat():
|
|
input_tensors = [torch.randn(*input_shape) for input_shape in input_shapes]
|
|
total_num_elements = 0
|
|
for input_tensor, repeat in zip(input_tensors, repeats):
|
|
total_num_elements += input_tensor.numel()
|
|
total_num_elements += input_tensor.numel() * np.prod(repeat)
|
|
return input_tensors, (total_num_elements * DTYPE_TO_BYTES["float"])
|
|
|
|
|
|
input_tensors, total_bytes = generate_data_for_repeat()
|
|
BYTES_TO_MB = 1.0 / 1000.0 / 1000.0
|
|
|
|
|
|
def pt_repeat(input_tensor, repeat):
|
|
return input_tensor.repeat(repeat)
|
|
|
|
|
|
def pt_repeat_n_times(niters):
|
|
for _ in range(niters):
|
|
for input_tensor, repeat in zip(input_tensors, repeats):
|
|
pt_repeat(input_tensor, repeat)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
# Warm up runs.
|
|
pt_repeat_n_times(NUM_WARMUP_ITERS)
|
|
s = time.time()
|
|
pt_repeat_n_times(NUM_BENCHMARK_ITERS)
|
|
total_time_s = time.time() - s
|
|
total_time_per_iter_s = total_time_s / NUM_BENCHMARK_ITERS
|
|
achieved_bandwidth = (total_bytes * BYTES_TO_MB) / total_time_per_iter_s
|
|
print(f"Time:{total_time_per_iter_s} Achieved Bandwidth:{achieved_bandwidth} MB/s")
|