mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[Benchmarking] Run MPS benchmarks for [b]float16 (#151747)
And implicitly pass `--float32` when collecting results for "notset" option. Speedups for some models are much higher for float16 dtype, but it's important to track accuracy Pull Request resolved: https://github.com/pytorch/pytorch/pull/151747 Approved by: https://github.com/Skylion007
This commit is contained in:
parent
ed511cd537
commit
9b74ea2490
|
|
@ -221,27 +221,39 @@ test_torchbench_smoketest() {
|
||||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||||
mkdir -p "$TEST_REPORTS_DIR"
|
mkdir -p "$TEST_REPORTS_DIR"
|
||||||
|
|
||||||
local dtype=notset
|
|
||||||
local device=mps
|
local device=mps
|
||||||
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152)
|
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152)
|
||||||
|
|
||||||
for backend in eager inductor; do
|
for backend in eager inductor; do
|
||||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv"
|
|
||||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
|
|
||||||
|
|
||||||
echo "Launching torchbench training performance run for backend ${backend}"
|
for dtype in notset float16 bfloat16; do
|
||||||
for model in "${models[@]}"; do
|
echo "Launching torchbench inference performance run for backend ${backend} and dtype ${dtype}"
|
||||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
local dtype_arg="--${dtype}"
|
||||||
--performance --only "$model" --backend "$backend" --training --devices "$device" \
|
if [ "$dtype" == notset ]; then
|
||||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv" || true
|
dtype_arg="--float32"
|
||||||
|
fi
|
||||||
|
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
|
||||||
|
for model in "${models[@]}"; do
|
||||||
|
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||||
|
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||||
|
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
|
||||||
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Launching torchbench inference performance run for backend ${backend}"
|
for dtype in notset amp; do
|
||||||
for model in "${models[@]}"; do
|
echo "Launching torchbench training performance run for backend ${backend} and dtype ${dtype}"
|
||||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv"
|
||||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" \
|
local dtype_arg="--${dtype}"
|
||||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
|
if [ "$dtype" == notset ]; then
|
||||||
|
dtype_arg="--float32"
|
||||||
|
fi
|
||||||
|
for model in "${models[@]}"; do
|
||||||
|
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||||
|
--performance --only "$model" --backend "$backend" --training --devices "$device" "$dtype_arg" \
|
||||||
|
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv" || true
|
||||||
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Pytorch benchmark on mps device completed"
|
echo "Pytorch benchmark on mps device completed"
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user