mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Changelog: - Don't count running PYTORCH_TEST_WITH_DYNAMO=1 on dynamo/ tests in the pass rate. This was a bug (we were counting all of these as failing, but in reality, most of these pass). The net effect is that the passrate is (artifically) 6% higher. - Have the histogram script filter out skips based on the passrate metric. Pull Request resolved: https://github.com/pytorch/pytorch/pull/118752 Approved by: https://github.com/jamesjwu
88 lines
2.6 KiB
Python
88 lines
2.6 KiB
Python
import argparse
|
|
import re
|
|
|
|
from common import get_testcases, key, open_test_results, skipped_test
|
|
|
|
from passrate import compute_pass_rate
|
|
|
|
|
|
"""
|
|
python failures_histogram.py eager_logs_for_py311/ dynamo_logs_for_py311/
|
|
|
|
Analyzes skip reasons for Dynamo tests and prints a histogram with repro
|
|
commands. You'll need to download the test reports for the Dynamo shards
|
|
and put them under the specified directory; ditto for the eager shards.
|
|
"""
|
|
|
|
|
|
def skip_reason(testcase):
|
|
for child in testcase.iter():
|
|
if child.tag != "skipped":
|
|
continue
|
|
return child.attrib["message"]
|
|
raise AssertionError("no message?")
|
|
|
|
|
|
def skip_reason_normalized(testcase):
|
|
for child in testcase.iter():
|
|
if child.tag != "skipped":
|
|
continue
|
|
result = child.attrib["message"].split("\n")[0]
|
|
result = result.split(">")[0]
|
|
result = re.sub(r"0x\w+", "0xDEADBEEF", result)
|
|
result = re.sub(r"MagicMock id='\d+'", "MagicMock id='0000000000'", result)
|
|
result = re.sub(r"issues/\d+", "issues/XXX", result)
|
|
return result
|
|
raise AssertionError("no message?")
|
|
|
|
|
|
def get_failures(testcases):
|
|
skipped = [t for t in testcases if skipped_test(t)]
|
|
skipped_dict = {}
|
|
for s in skipped:
|
|
reason = skip_reason_normalized(s)
|
|
if reason not in skipped_dict:
|
|
skipped_dict[reason] = []
|
|
skipped_dict[reason].append(s)
|
|
result = []
|
|
for s, v in skipped_dict.items():
|
|
result.append((len(v), s, v))
|
|
result.sort(reverse=True)
|
|
return result
|
|
|
|
|
|
def repro(testcase):
|
|
return f"PYTORCH_TEST_WITH_DYNAMO=1 pytest {testcase.attrib['file']} -v -k {testcase.attrib['name']}"
|
|
|
|
|
|
# e.g. "17c5f69852/eager", "17c5f69852/dynamo"
|
|
def failures_histogram(eager_dir, dynamo_dir):
|
|
fail_keys = compute_pass_rate(eager_dir, dynamo_dir)
|
|
xmls = open_test_results(dynamo_dir)
|
|
|
|
testcases = get_testcases(xmls)
|
|
testcases = [t for t in testcases if key(t) in fail_keys]
|
|
dct = get_failures(testcases)
|
|
|
|
a = [(x, y, repro(z[0])) for x, y, z in dct]
|
|
|
|
counts, _, _ = zip(*a)
|
|
|
|
print("(num_failed_tests, error_msg, sample_test)")
|
|
for row in a:
|
|
print(row)
|
|
print("[counts]", sum(counts))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = argparse.ArgumentParser(
|
|
prog="failures_histogram",
|
|
description="See statistics about skipped Dynamo tests",
|
|
)
|
|
# linux-focal-py3.11-clang10 (default) Test Reports (xml) directory
|
|
parser.add_argument("eager_dir")
|
|
# linux-focal-py3.11-clang10 (dynamo) Test Reports (xml) directory
|
|
parser.add_argument("dynamo_dir")
|
|
args = parser.parse_args()
|
|
failures_histogram(args.eager_dir, args.dynamo_dir)
|