First step to rearrange files in tools folder (#60473)

Summary:
Changes including:
- introduced `linter/`, `testing/`, `stats/` folders in `tools/`
- move appropriate scripts into these folders
- change grepped references in the pytorch/pytorch repo

Next step
- introduce `build/` folder for build scripts

Pull Request resolved: https://github.com/pytorch/pytorch/pull/60473

Test Plan:
- CI (this is important b/c pytorch/test-infra also rely on some script reference.
- tools/tests/

Reviewed By: albanD

Differential Revision: D29352716

Pulled By: walterddr

fbshipit-source-id: bad40b5ce130b35dfd9e59b8af34f9025f3285fd
This commit is contained in:
Rong Rong (AI Infra) 2021-06-24 10:12:37 -07:00 committed by Facebook GitHub Bot
parent 40d2fe1053
commit 7e619b9588
51 changed files with 203 additions and 185 deletions

View File

@ -686,7 +686,7 @@ jobs:
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID" export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
cd workspace cd workspace
export PYTHONPATH="\${PWD}" export PYTHONPATH="\${PWD}"
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test
EOL EOL
echo "(cat docker_commands.sh | docker exec -u jenkins -e LANG=C.UTF-8 -i "$id" bash) 2>&1" > command.sh echo "(cat docker_commands.sh | docker exec -u jenkins -e LANG=C.UTF-8 -i "$id" bash) 2>&1" > command.sh
unbuffer bash command.sh | ts unbuffer bash command.sh | ts
@ -840,7 +840,7 @@ jobs:
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1} export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
export PYTHONPATH="$PWD" export PYTHONPATH="$PWD"
pip install typing_extensions boto3 pip install typing_extensions boto3
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test
when: always when: always
- store_test_results: - store_test_results:
path: test/test-reports path: test/test-reports
@ -1455,7 +1455,7 @@ jobs:
# Using the same IAM user to write stats to our OSS bucket # Using the same IAM user to write stats to our OSS bucket
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4} export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4} export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test
when: always when: always
- store_test_results: - store_test_results:
path: test/test-reports path: test/test-reports

View File

@ -213,7 +213,7 @@
# Using the same IAM user to write stats to our OSS bucket # Using the same IAM user to write stats to our OSS bucket
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4} export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4} export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test
when: always when: always
- store_test_results: - store_test_results:
path: test/test-reports path: test/test-reports

View File

@ -224,7 +224,7 @@ jobs:
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID" export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
cd workspace cd workspace
export PYTHONPATH="\${PWD}" export PYTHONPATH="\${PWD}"
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test
EOL EOL
echo "(cat docker_commands.sh | docker exec -u jenkins -e LANG=C.UTF-8 -i "$id" bash) 2>&1" > command.sh echo "(cat docker_commands.sh | docker exec -u jenkins -e LANG=C.UTF-8 -i "$id" bash) 2>&1" > command.sh
unbuffer bash command.sh | ts unbuffer bash command.sh | ts
@ -378,7 +378,7 @@ jobs:
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1} export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
export PYTHONPATH="$PWD" export PYTHONPATH="$PWD"
pip install typing_extensions boto3 pip install typing_extensions boto3
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test
when: always when: always
- store_test_results: - store_test_results:
path: test/test-reports path: test/test-reports

View File

@ -145,7 +145,7 @@ jobs:
sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh' sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh'
- name: Display and upload binary build size statistics (Click Me) - name: Display and upload binary build size statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }} CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
@ -337,7 +337,7 @@ jobs:
- name: Checkout PyTorch - name: Checkout PyTorch
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
# deep clone, to allow tools/print_test_stats.py to use Git commands # deep clone, to allow tools/stats/print_test_stats.py to use Git commands
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2
name: Download PyTorch Test Reports name: Download PyTorch Test Reports
@ -360,7 +360,7 @@ jobs:
run: .github/scripts/parse_ref.py run: .github/scripts/parse_ref.py
- name: Display and upload test statistics (Click Me) - name: Display and upload test statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
@ -373,7 +373,7 @@ jobs:
CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}' CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}'
run: | run: |
export PYTHONPATH=$PWD export PYTHONPATH=$PWD
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test
{%- if enable_doc_jobs %} {%- if enable_doc_jobs %}
pytorch_python_doc_build: pytorch_python_doc_build:

View File

@ -203,7 +203,7 @@ jobs:
- name: Checkout PyTorch - name: Checkout PyTorch
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
# deep clone, to allow tools/print_test_stats.py to use Git commands # deep clone, to allow tools/stats/print_test_stats.py to use Git commands
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2
name: Download PyTorch Test Reports name: Download PyTorch Test Reports
@ -226,7 +226,7 @@ jobs:
run: .github/scripts/parse_ref.py run: .github/scripts/parse_ref.py
- name: Display and upload test statistics (Click Me) - name: Display and upload test statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
@ -239,4 +239,4 @@ jobs:
CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}' CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}'
run: | run: |
export PYTHONPATH=$PWD export PYTHONPATH=$PWD
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test

View File

@ -93,7 +93,7 @@ jobs:
path: /remote/**/*.bz2 path: /remote/**/*.bz2
- name: Display and upload binary build size statistics (Click Me) - name: Display and upload binary build size statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }} CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}

View File

@ -92,7 +92,7 @@ jobs:
path: /remote/**/*.zip path: /remote/**/*.zip
- name: Display and upload binary build size statistics (Click Me) - name: Display and upload binary build size statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }} CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}

View File

@ -91,7 +91,7 @@ jobs:
path: /remote/**/*.whl path: /remote/**/*.whl
- name: Display and upload binary build size statistics (Click Me) - name: Display and upload binary build size statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }} CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}

View File

@ -29,12 +29,12 @@ jobs:
# only run clang-format on allowlisted files # only run clang-format on allowlisted files
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo "| clang-format failures found! Run: " echo "| clang-format failures found! Run: "
echo "| tools/clang_format_ci.sh ${BASE_SHA} " echo "| tools/linter/clang_format_ci.sh ${BASE_SHA} "
echo "| to fix this error. " echo "| to fix this error. "
echo "| For more info, see: https://github.com/pytorch/pytorch/wiki/clang-format " echo "| For more info, see: https://github.com/pytorch/pytorch/wiki/clang-format "
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
tools/clang_format_ci.sh "${BASE_SHA}" tools/linter/clang_format_ci.sh "${BASE_SHA}"
GIT_DIFF=$(git diff) GIT_DIFF=$(git diff)
if [[ -z $GIT_DIFF ]]; then if [[ -z $GIT_DIFF ]]; then

View File

@ -31,7 +31,7 @@ jobs:
- name: Ensure correct trailing newlines - name: Ensure correct trailing newlines
if: always() && steps.requirements.outcome == 'success' if: always() && steps.requirements.outcome == 'success'
run: | run: |
(! git --no-pager grep -Il '' -- . ':(exclude)**/contrib/**' ':(exclude)third_party' ':(exclude)**.expect' ':(exclude)tools/clang_format_hash' | tools/trailing_newlines.py || (echo "The above files do not have correct trailing newlines; please normalize them"; false)) (! git --no-pager grep -Il '' -- . ':(exclude)**/contrib/**' ':(exclude)third_party' ':(exclude)**.expect' ':(exclude)tools/clang_format_hash' | tools/linter/trailing_newlines.py || (echo "The above files do not have correct trailing newlines; please normalize them"; false))
- name: Ensure no trailing spaces - name: Ensure no trailing spaces
if: always() if: always()
run: | run: |
@ -160,7 +160,7 @@ jobs:
- name: Run ShellCheck - name: Run ShellCheck
if: always() && steps.install_shellcheck.outcome == 'success' if: always() && steps.install_shellcheck.outcome == 'success'
run: | run: |
if ! tools/run_shellcheck.sh .extracted_scripts .jenkins/pytorch; then if ! tools/linter/run_shellcheck.sh .extracted_scripts .jenkins/pytorch; then
echo echo
echo 'ShellCheck gave a nonzero exit code. Please fix the warnings' echo 'ShellCheck gave a nonzero exit code. Please fix the warnings'
echo 'listed above. Note that if a path in one of the above warning' echo 'listed above. Note that if a path in one of the above warning'
@ -233,7 +233,7 @@ jobs:
- name: Install dependencies - name: Install dependencies
run: | run: |
set -eux set -eux
pip install typing-extensions # for tools/translate_annotations.py pip install typing-extensions # for tools/linter/translate_annotations.py
pip install -r requirements-flake8.txt pip install -r requirements-flake8.txt
flake8 --version flake8 --version
- name: Run flake8 - name: Run flake8
@ -245,7 +245,7 @@ jobs:
env: env:
HEAD_SHA: ${{ github.event.pull_request.head.sha }} HEAD_SHA: ${{ github.event.pull_request.head.sha }}
run: | run: |
tools/translate_annotations.py \ tools/linter/translate_annotations.py \
--file="${GITHUB_WORKSPACE}"/flake8-output.txt \ --file="${GITHUB_WORKSPACE}"/flake8-output.txt \
--regex='^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)' \ --regex='^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)' \
--commit="$HEAD_SHA" \ --commit="$HEAD_SHA" \
@ -275,7 +275,7 @@ jobs:
- name: Checkout PyTorch - name: Checkout PyTorch
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
fetch-depth: 0 # to allow tools/clang_tidy.py to do its thing fetch-depth: 0 # to allow tools/linter/clang_tidy.py to do its thing
- name: Prepare output dir with HEAD commit SHA - name: Prepare output dir with HEAD commit SHA
env: env:
HEAD_SHA: ${{ github.event.pull_request.head.sha }} HEAD_SHA: ${{ github.event.pull_request.head.sha }}
@ -328,7 +328,7 @@ jobs:
# /torch/csrc/generic/*.cpp is excluded because those files aren't actually built. # /torch/csrc/generic/*.cpp is excluded because those files aren't actually built.
# deploy/interpreter files are excluded due to using macros and other techniquies # deploy/interpreter files are excluded due to using macros and other techniquies
# that are not easily converted to accepted c++ # that are not easily converted to accepted c++
python3 tools/clang_tidy.py \ python3 tools/linter/clang_tidy.py \
--verbose \ --verbose \
--paths torch/csrc/ \ --paths torch/csrc/ \
--diff-file pr.diff \ --diff-file pr.diff \
@ -353,7 +353,7 @@ jobs:
cat "${GITHUB_WORKSPACE}"/clang-tidy-output.txt cat "${GITHUB_WORKSPACE}"/clang-tidy-output.txt
tools/translate_annotations.py \ tools/linter/translate_annotations.py \
--file=clang-tidy-output.txt \ --file=clang-tidy-output.txt \
--regex='^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]' \ --regex='^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]' \
--commit="$HEAD_SHA" \ --commit="$HEAD_SHA" \

View File

@ -143,7 +143,7 @@ jobs:
sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh' sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh'
- name: Display and upload binary build size statistics (Click Me) - name: Display and upload binary build size statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }} CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
@ -335,7 +335,7 @@ jobs:
- name: Checkout PyTorch - name: Checkout PyTorch
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
# deep clone, to allow tools/print_test_stats.py to use Git commands # deep clone, to allow tools/stats/print_test_stats.py to use Git commands
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2
name: Download PyTorch Test Reports name: Download PyTorch Test Reports
@ -358,7 +358,7 @@ jobs:
run: .github/scripts/parse_ref.py run: .github/scripts/parse_ref.py
- name: Display and upload test statistics (Click Me) - name: Display and upload test statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
@ -371,4 +371,4 @@ jobs:
CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}' CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}'
run: | run: |
export PYTHONPATH=$PWD export PYTHONPATH=$PWD
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test

View File

@ -143,7 +143,7 @@ jobs:
sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh' sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh'
- name: Display and upload binary build size statistics (Click Me) - name: Display and upload binary build size statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }} CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
@ -335,7 +335,7 @@ jobs:
- name: Checkout PyTorch - name: Checkout PyTorch
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
# deep clone, to allow tools/print_test_stats.py to use Git commands # deep clone, to allow tools/stats/print_test_stats.py to use Git commands
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2
name: Download PyTorch Test Reports name: Download PyTorch Test Reports
@ -358,7 +358,7 @@ jobs:
run: .github/scripts/parse_ref.py run: .github/scripts/parse_ref.py
- name: Display and upload test statistics (Click Me) - name: Display and upload test statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
@ -371,4 +371,4 @@ jobs:
CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}' CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}'
run: | run: |
export PYTHONPATH=$PWD export PYTHONPATH=$PWD
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test

View File

@ -144,7 +144,7 @@ jobs:
sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh' sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh'
- name: Display and upload binary build size statistics (Click Me) - name: Display and upload binary build size statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }} CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
@ -336,7 +336,7 @@ jobs:
- name: Checkout PyTorch - name: Checkout PyTorch
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
# deep clone, to allow tools/print_test_stats.py to use Git commands # deep clone, to allow tools/stats/print_test_stats.py to use Git commands
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2
name: Download PyTorch Test Reports name: Download PyTorch Test Reports
@ -359,7 +359,7 @@ jobs:
run: .github/scripts/parse_ref.py run: .github/scripts/parse_ref.py
- name: Display and upload test statistics (Click Me) - name: Display and upload test statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
@ -372,7 +372,7 @@ jobs:
CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}' CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}'
run: | run: |
export PYTHONPATH=$PWD export PYTHONPATH=$PWD
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test
pytorch_python_doc_build: pytorch_python_doc_build:
runs-on: linux.2xlarge runs-on: linux.2xlarge

View File

@ -167,7 +167,7 @@ jobs:
- name: Checkout PyTorch - name: Checkout PyTorch
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
# deep clone, to allow tools/print_test_stats.py to use Git commands # deep clone, to allow tools/stats/print_test_stats.py to use Git commands
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2
name: Download PyTorch Test Reports name: Download PyTorch Test Reports
@ -190,7 +190,7 @@ jobs:
run: .github/scripts/parse_ref.py run: .github/scripts/parse_ref.py
- name: Display and upload test statistics (Click Me) - name: Display and upload test statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
@ -203,4 +203,4 @@ jobs:
CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}' CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}'
run: | run: |
export PYTHONPATH=$PWD export PYTHONPATH=$PWD
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test

View File

@ -185,7 +185,7 @@ jobs:
- name: Checkout PyTorch - name: Checkout PyTorch
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
# deep clone, to allow tools/print_test_stats.py to use Git commands # deep clone, to allow tools/stats/print_test_stats.py to use Git commands
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2
name: Download PyTorch Test Reports name: Download PyTorch Test Reports
@ -208,7 +208,7 @@ jobs:
run: .github/scripts/parse_ref.py run: .github/scripts/parse_ref.py
- name: Display and upload test statistics (Click Me) - name: Display and upload test statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
@ -221,4 +221,4 @@ jobs:
CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}' CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}'
run: | run: |
export PYTHONPATH=$PWD export PYTHONPATH=$PWD
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test

View File

@ -184,7 +184,7 @@ jobs:
- name: Checkout PyTorch - name: Checkout PyTorch
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
# deep clone, to allow tools/print_test_stats.py to use Git commands # deep clone, to allow tools/stats/print_test_stats.py to use Git commands
fetch-depth: 0 fetch-depth: 0
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2
name: Download PyTorch Test Reports name: Download PyTorch Test Reports
@ -207,7 +207,7 @@ jobs:
run: .github/scripts/parse_ref.py run: .github/scripts/parse_ref.py
- name: Display and upload test statistics (Click Me) - name: Display and upload test statistics (Click Me)
# temporary hack: set CIRCLE_* vars, until we update # temporary hack: set CIRCLE_* vars, until we update
# tools/print_test_stats.py to natively support GitHub Actions # tools/stats/print_test_stats.py to natively support GitHub Actions
env: env:
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
@ -220,4 +220,4 @@ jobs:
CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}' CIRCLE_WORKFLOW_ID: '${{ github.run_id }}_${{ github.run_number }}'
run: | run: |
export PYTHONPATH=$PWD export PYTHONPATH=$PWD
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test python tools/stats/print_test_stats.py --upload-to-s3 --compare-with-s3 test

View File

@ -439,12 +439,12 @@ is part of the test suite `ContainerAliasingTest` in the file
### Run Specific CI Jobs ### Run Specific CI Jobs
You can generate a commit that limits the CI to only run a specific job by using You can generate a commit that limits the CI to only run a specific job by using
`tools/explicit_ci_jobs.py` like so: `tools/testing/explicit_ci_jobs.py` like so:
```bash ```bash
# --job: specify one or more times to filter to a specific job + its dependencies # --job: specify one or more times to filter to a specific job + its dependencies
# --make-commit: commit CI changes to git with a message explaining the change # --make-commit: commit CI changes to git with a message explaining the change
python tools/explicit_ci_jobs.py --job binary_linux_manywheel_3_6m_cpu_devtoolset7_nightly_test --make-commit python tools/testing/explicit_ci_jobs.py --job binary_linux_manywheel_3_6m_cpu_devtoolset7_nightly_test --make-commit
# Make your changes # Make your changes
@ -1128,7 +1128,7 @@ have more checks than older versions. In our CI, we run clang-tidy-6.0.
uncommitted changes). Changes are picked up based on a `git diff` with the uncommitted changes). Changes are picked up based on a `git diff` with the
given revision: given revision:
```bash ```bash
python tools/clang_tidy.py -d build -p torch/csrc --diff 'HEAD~1' python tools/linter/clang_tidy.py -d build -p torch/csrc --diff 'HEAD~1'
``` ```
Above, it is assumed you are in the PyTorch root folder. `path/to/build` should Above, it is assumed you are in the PyTorch root folder. `path/to/build` should

View File

@ -26,7 +26,7 @@ SHELLCHECK_GHA_GENERATED_FOLDER=.shellcheck_generated_gha
shellcheck-gha: shellcheck-gha:
@$(RM) -r $(SHELLCHECK_GHA_GENERATED_FOLDER) @$(RM) -r $(SHELLCHECK_GHA_GENERATED_FOLDER)
tools/extract_scripts.py --out=$(SHELLCHECK_GHA_GENERATED_FOLDER) tools/extract_scripts.py --out=$(SHELLCHECK_GHA_GENERATED_FOLDER)
tools/run_shellcheck.sh $(SHELLCHECK_GHA_GENERATED_FOLDER) tools/linter/run_shellcheck.sh $(SHELLCHECK_GHA_GENERATED_FOLDER)
generate-gha-workflows: generate-gha-workflows:
.github/scripts/generate_ci_workflows.py .github/scripts/generate_ci_workflows.py

View File

@ -40,7 +40,6 @@ files =
.github, .github,
benchmarks/instruction_counts, benchmarks/instruction_counts,
tools, tools,
torch/testing/_internal/framework_utils.py,
torch/utils/_pytree.py, torch/utils/_pytree.py,
torch/utils/benchmark/utils/common.py, torch/utils/benchmark/utils/common.py,
torch/utils/benchmark/utils/timer.py, torch/utils/benchmark/utils/timer.py,

View File

@ -38,11 +38,11 @@ files =
test/test_type_hints.py, test/test_type_hints.py,
test/test_type_info.py, test/test_type_info.py,
test/test_utils.py, test/test_utils.py,
tools/clang_format_utils.py, tools/linter/clang_format_utils.py,
tools/clang_tidy.py, tools/linter/clang_tidy.py,
tools/generate_torch_version.py, tools/generate_torch_version.py,
tools/render_junit.py, tools/render_junit.py,
tools/stats_utils tools/stats
# #
# `exclude` is a regex, not a list of paths like `files` (sigh) # `exclude` is a regex, not a list of paths like `files` (sigh)
@ -109,7 +109,7 @@ warn_unused_ignores = False
[mypy-tools.generate_torch_version] [mypy-tools.generate_torch_version]
warn_unused_ignores = False warn_unused_ignores = False
[mypy-tools.stats_utils.s3_stat_parser] [mypy-tools.stats.s3_stat_parser]
warn_unused_ignores = False warn_unused_ignores = False
# #

View File

@ -16,18 +16,18 @@ import tempfile
import torch import torch
from torch.utils import cpp_extension from torch.utils import cpp_extension
from torch.testing._internal.common_utils import FILE_SCHEMA, IS_IN_CI, TEST_WITH_ROCM, shell, set_cwd from torch.testing._internal.common_utils import FILE_SCHEMA, IS_IN_CI, TEST_WITH_ROCM, shell, set_cwd
from torch.testing._internal.framework_utils import calculate_shards
import torch.distributed as dist import torch.distributed as dist
from typing import Dict, Optional, Tuple, List, Any from typing import Dict, Optional, Tuple, List, Any
from typing_extensions import TypedDict from typing_extensions import TypedDict
try: try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")) sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from tools.stats_utils.s3_stat_parser import ( from tools.stats.s3_stat_parser import (
get_previous_reports_for_branch, get_previous_reports_for_branch,
get_previous_reports_for_pr, get_previous_reports_for_pr,
Report, Report,
HAVE_BOTO3) HAVE_BOTO3)
from tools.testing.test_selections import calculate_shards
except ImportError: except ImportError:
print("Unable to import s3_stat_parser from tools. Running without S3 stats...") print("Unable to import s3_stat_parser from tools. Running without S3 stats...")
HAVE_BOTO3 = False HAVE_BOTO3 = False

View File

@ -4,7 +4,7 @@ from torch.testing._internal.common_utils import TestCase, run_tests
# these tests could eventually be changed to fail if the import/init # these tests could eventually be changed to fail if the import/init
# time is greater than a certain threshold, but for now we just use them # time is greater than a certain threshold, but for now we just use them
# as a way to track the duration of `import torch` in our ossci-metrics # as a way to track the duration of `import torch` in our ossci-metrics
# S3 bucket (see tools/print_test_stats.py) # S3 bucket (see tools/stats/print_test_stats.py)
class TestImportTime(TestCase): class TestImportTime(TestCase):
def test_time_import_torch(self): def test_time_import_torch(self):
TestCase.runWithPytorchAPIUsageStderr('import torch') TestCase.runWithPytorchAPIUsageStderr('import torch')

View File

@ -3,7 +3,6 @@ import functools
import itertools import itertools
import math import math
import os import os
import random
import re import re
import unittest import unittest
from typing import Any, Callable, Iterator, List, Tuple from typing import Any, Callable, Iterator, List, Tuple
@ -12,7 +11,6 @@ import torch
from torch.testing._internal.common_utils import \ from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS, TestCase, make_tensor, run_tests, skipIfRocm, slowTest) (IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS, TestCase, make_tensor, run_tests, skipIfRocm, slowTest)
from torch.testing._internal.framework_utils import calculate_shards
from torch.testing._internal.common_device_type import \ from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes, (PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyOnCPUAndCUDA, get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyOnCPUAndCUDA,
@ -619,97 +617,6 @@ instantiate_device_type_tests(TestTesting, globals())
class TestFrameworkUtils(TestCase): class TestFrameworkUtils(TestCase):
tests = [
'super_long_test',
'long_test1',
'long_test2',
'normal_test1',
'normal_test2',
'normal_test3',
'short_test1',
'short_test2',
'short_test3',
'short_test4',
'short_test5',
]
test_times = {
'super_long_test': 55,
'long_test1': 22,
'long_test2': 18,
'normal_test1': 9,
'normal_test2': 7,
'normal_test3': 5,
'short_test1': 1,
'short_test2': 0.6,
'short_test3': 0.4,
'short_test4': 0.3,
'short_test5': 0.01,
}
def test_calculate_2_shards_with_complete_test_times(self):
expected_shards = [
(60, ['super_long_test', 'normal_test3']),
(58.31, ['long_test1', 'long_test2', 'normal_test1', 'normal_test2', 'short_test1', 'short_test2',
'short_test3', 'short_test4', 'short_test5'])
]
self.assertEqual(expected_shards, calculate_shards(2, self.tests, self.test_times))
def test_calculate_5_shards_with_complete_test_times(self):
expected_shards = [
(55, ['super_long_test']),
(22, ['long_test1', ]),
(18, ['long_test2', ]),
(11.31, ['normal_test1', 'short_test1', 'short_test2', 'short_test3', 'short_test4', 'short_test5']),
(12, ['normal_test2', 'normal_test3']),
]
self.assertEqual(expected_shards, calculate_shards(5, self.tests, self.test_times))
def test_calculate_2_shards_with_incomplete_test_times(self):
incomplete_test_times = {k: v for k, v in self.test_times.items() if 'test1' in k}
expected_shards = [
(22, ['long_test1', 'long_test2', 'normal_test3', 'short_test3', 'short_test5']),
(10, ['normal_test1', 'short_test1', 'super_long_test', 'normal_test2', 'short_test2', 'short_test4']),
]
self.assertEqual(expected_shards, calculate_shards(2, self.tests, incomplete_test_times))
def test_calculate_5_shards_with_incomplete_test_times(self):
incomplete_test_times = {k: v for k, v in self.test_times.items() if 'test1' in k}
expected_shards = [
(22, ['long_test1', 'normal_test2', 'short_test5']),
(9, ['normal_test1', 'normal_test3']),
(1, ['short_test1', 'short_test2']),
(0, ['super_long_test', 'short_test3']),
(0, ['long_test2', 'short_test4']),
]
self.assertEqual(expected_shards, calculate_shards(5, self.tests, incomplete_test_times))
def test_calculate_2_shards_against_optimal_shards(self):
for _ in range(100):
random.seed(120)
random_times = {k: random.random() * 10 for k in self.tests}
# all test times except first two
rest_of_tests = [i for k, i in random_times.items() if k != 'super_long_test' and k != 'long_test1']
sum_of_rest = sum(rest_of_tests)
random_times['super_long_test'] = max(sum_of_rest / 2, max(rest_of_tests))
random_times['long_test1'] = sum_of_rest - random_times['super_long_test']
# An optimal sharding would look like the below, but we don't need to compute this for the test:
# optimal_shards = [
# (sum_of_rest, ['super_long_test', 'long_test1']),
# (sum_of_rest, [i for i in self.tests if i != 'super_long_test' and i != 'long_test1']),
# ]
calculated_shards = calculate_shards(2, self.tests, random_times)
max_shard_time = max(calculated_shards[0][0], calculated_shards[1][0])
if sum_of_rest != 0:
# The calculated shard should not have a ratio worse than 7/6 for num_shards = 2
self.assertGreaterEqual(7.0 / 6.0, max_shard_time / sum_of_rest)
sorted_tests = sorted(self.tests)
sorted_shard_tests = sorted(calculated_shards[0][1] + calculated_shards[1][1])
# All the tests should be represented by some shard
self.assertEqual(sorted_tests, sorted_shard_tests)
@skipIfRocm @skipIfRocm
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows") @unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")

View File

@ -37,11 +37,11 @@ Build system pieces:
Developer tools which you might find useful: Developer tools which you might find useful:
* [clang_tidy.py](clang_tidy.py) - Script for running clang-tidy * [linter/clang_tidy.py](linter/clang_tidy.py) - Script for running clang-tidy
on lines of your script which you changed. on lines of your script which you changed.
* [extract_scripts.py](extract_scripts.py) - Extract scripts from * [extract_scripts.py](extract_scripts.py) - Extract scripts from
`.github/workflows/*.yml` into a specified dir, on which linters such as `.github/workflows/*.yml` into a specified dir, on which linters such as
[run_shellcheck.sh](run_shellcheck.sh) can be run. Assumes that every `run` [linter/run_shellcheck.sh](linter/run_shellcheck.sh) can be run. Assumes that every `run`
script has `shell: bash` unless a different shell is explicitly listed on that script has `shell: bash` unless a different shell is explicitly listed on that
specific step (so `defaults` doesn't currently work), but also has some rules specific step (so `defaults` doesn't currently work), but also has some rules
for other situations such as [actions/github-script][]. Exits with nonzero for other situations such as [actions/github-script][]. Exits with nonzero
@ -53,17 +53,17 @@ Developer tools which you might find useful:
can conveniently run diffs on them when working on code-generation. can conveniently run diffs on them when working on code-generation.
(See also [generated_dirs.txt](generated_dirs.txt) which (See also [generated_dirs.txt](generated_dirs.txt) which
specifies the list of directories with generated files.) specifies the list of directories with generated files.)
* [mypy_wrapper.py](mypy_wrapper.py) - Run `mypy` on a single file using the * [linter/mypy_wrapper.py](linter/mypy_wrapper.py) - Run `mypy` on a single file using the
appropriate subset of our `mypy*.ini` configs. appropriate subset of our `mypy*.ini` configs.
* [run_shellcheck.sh](run_shellcheck.sh) - Find `*.sh` files (recursively) in * [linter/run_shellcheck.sh](linter/run_shellcheck.sh) - Find `*.sh` files (recursively) in
the directories specified as arguments, and run [ShellCheck][] on all of them. the directories specified as arguments, and run [ShellCheck][] on all of them.
* [test_history.py](test_history.py) - Query S3 to display history of a single * [stats/test_history.py](stats/test_history.py) - Query S3 to display history of a single
test across multiple jobs over time. test across multiple jobs over time.
* [trailing_newlines.py](trailing_newlines.py) - Take names of UTF-8 files from * [linter/trailing_newlines.py](linter/trailing_newlines.py) - Take names of UTF-8 files from
stdin, print names of nonempty files whose contents don't end in exactly one stdin, print names of nonempty files whose contents don't end in exactly one
trailing newline, exit with status 1 if no output printed or 0 if some trailing newline, exit with status 1 if no output printed or 0 if some
filenames were printed. filenames were printed.
* [translate_annotations.py](translate_annotations.py) - Read [Flake8][] or * [linter/translate_annotations.py](linter/translate_annotations.py) - Read [Flake8][] or
[clang-tidy][] warnings (according to a `--regex`) from a `--file`, convert to [clang-tidy][] warnings (according to a `--regex`) from a `--file`, convert to
the JSON format accepted by [pytorch/add-annotations-github-action], and the JSON format accepted by [pytorch/add-annotations-github-action], and
translate line numbers from `HEAD` back in time to the given `--commit` by translate line numbers from `HEAD` back in time to the given `--commit` by

View File

@ -221,7 +221,7 @@ class Mypy(Check):
async def quick(self, files: List[str]) -> CommandResult: async def quick(self, files: List[str]) -> CommandResult:
return await shell_cmd( return await shell_cmd(
[sys.executable, "tools/mypy_wrapper.py"] [sys.executable, "tools/linter/mypy_wrapper.py"]
+ [os.path.join(REPO_ROOT, f) for f in files], + [os.path.join(REPO_ROOT, f) for f in files],
env=self.env(), env=self.env(),
) )
@ -270,7 +270,7 @@ class ShellCheck(Check):
async def quick(self, files: List[str]) -> CommandResult: async def quick(self, files: List[str]) -> CommandResult:
return await shell_cmd( return await shell_cmd(
["tools/run_shellcheck.sh"] + [os.path.join(REPO_ROOT, f) for f in files], ["tools/linter/run_shellcheck.sh"] + [os.path.join(REPO_ROOT, f) for f in files],
) )
async def full(self) -> None: async def full(self) -> None:

View File

@ -2,12 +2,12 @@
set -e set -e
echo "Running pre-commit flake8" echo "Running pre-commit flake8"
python tools/flake8_hook.py python tools/linter/flake8_hook.py
if [ $(which clang-tidy) ] if [ $(which clang-tidy) ]
then then
echo "Running pre-commit clang-tidy" echo "Running pre-commit clang-tidy"
python tools/clang_tidy.py \ python tools/linter/clang_tidy.py \
--paths torch/csrc \ --paths torch/csrc \
--diff HEAD \ --diff HEAD \
-g"-torch/csrc/jit/passes/onnx/helper.cpp" \ -g"-torch/csrc/jit/passes/onnx/helper.cpp" \
@ -22,4 +22,4 @@ else
fi fi
echo "Running pre-commit clang-format" echo "Running pre-commit clang-format"
tools/git-clang-format HEAD~ --force tools/linter/git-clang-format HEAD~ --force

View File

@ -10,4 +10,4 @@ find . -type f \
-path './torch/csrc/jit/*' -or \ -path './torch/csrc/jit/*' -or \
-path './test/cpp/jit/*' -or \ -path './test/cpp/jit/*' -or \
-path './test/cpp/tensorexpr/*' \ -path './test/cpp/tensorexpr/*' \
| xargs tools/git-clang-format --verbose "$1" -- | xargs tools/linter/git-clang-format --verbose "$1" --

View File

@ -196,7 +196,7 @@ def main(args: List[str]) -> None:
"python.linting.mypyPath": "python.linting.mypyPath":
"${env:HOME}/miniconda3/envs/pytorch/bin/python", "${env:HOME}/miniconda3/envs/pytorch/bin/python",
"python.linting.mypyArgs": [ "python.linting.mypyArgs": [
"${workspaceFolder}/tools/mypy_wrapper.py" "${workspaceFolder}/tools/linter/mypy_wrapper.py"
] ]
} }

View File

@ -24,7 +24,7 @@ class Diff(TypedDict):
hunks: List[Hunk] hunks: List[Hunk]
# adapted from the similar regex in tools/clang_tidy.py # adapted from the similar regex in tools/linter/clang_tidy.py
# @@ -start,count +start,count @@ # @@ -start,count +start,count @@
hunk_pattern = r'^@@\s+-(\d+)(?:,(\d+))?\s+\+(\d+)(?:,(\d+))?\s+@@' hunk_pattern = r'^@@\s+-(\d+)(?:,(\d+))?\s+\+(\d+)(?:,(\d+))?\s+@@'

0
tools/stats/__init__.py Normal file
View File

View File

@ -5,7 +5,7 @@ import json
import os import os
import statistics import statistics
from collections import defaultdict from collections import defaultdict
from tools.stats_utils.s3_stat_parser import get_previous_reports_for_branch, Report, Version2Report from tools.stats.s3_stat_parser import get_previous_reports_for_branch, Report, Version2Report
from typing import cast, DefaultDict, Dict, List, Any from typing import cast, DefaultDict, Dict, List, Any
from urllib.request import urlopen from urllib.request import urlopen

View File

@ -18,9 +18,9 @@ from xml.dom import minidom
import requests import requests
from typing_extensions import TypedDict from typing_extensions import TypedDict
from tools.stats_utils.s3_stat_parser import (newify_case, get_S3_object_from_bucket, get_test_stats_summaries_for_job, from tools.stats.s3_stat_parser import (newify_case, get_S3_object_from_bucket, get_test_stats_summaries_for_job,
Report, Status, Commit, HAVE_BOTO3, Version2Case, VersionedReport, Report, Status, Commit, HAVE_BOTO3, Version2Case, VersionedReport,
Version1Report, Version2Report, ReportMetaMeta) Version1Report, Version2Report, ReportMetaMeta)
@ -50,7 +50,7 @@ class SuiteDiff(TypedDict):
# TODO: consolidate this with the get_cases function from # TODO: consolidate this with the get_cases function from
# tools/test_history.py # tools/stats/test_history.py
# Here we translate to a three-layer format (file -> suite -> case) # Here we translate to a three-layer format (file -> suite -> case)
# rather than a two-layer format (suite -> case) because as mentioned in # rather than a two-layer format (suite -> case) because as mentioned in

View File

@ -7,8 +7,8 @@ from datetime import datetime, timezone
from signal import SIG_DFL, SIGPIPE, signal from signal import SIG_DFL, SIGPIPE, signal
from typing import Dict, Iterator, List, Optional, Set, Tuple from typing import Dict, Iterator, List, Optional, Set, Tuple
from tools.stats_utils.s3_stat_parser import (Report, get_cases, from tools.stats.s3_stat_parser import (Report, get_cases,
get_test_stats_summaries) get_test_stats_summaries)
def get_git_commit_history( def get_git_commit_history(
@ -193,7 +193,7 @@ In multiline mode, each line next includes the name of a CircleCI job,
followed by the time of the specified test in that job at that commit. followed by the time of the specified test in that job at that commit.
Example: Example:
$ tools/test_history.py --mode=multiline --ref=594a66 --sha-length=8 --test=test_set_dir \ $ tools/stats/test_history.py --mode=multiline --ref=594a66 --sha-length=8 --test=test_set_dir \
--job pytorch_linux_xenial_py3_6_gcc5_4_test --job pytorch_linux_xenial_py3_6_gcc7_test --job pytorch_linux_xenial_py3_6_gcc5_4_test --job pytorch_linux_xenial_py3_6_gcc7_test
2021-02-10 11:13:34Z 594a66d7 pytorch_linux_xenial_py3_6_gcc5_4_test 0.36s 2021-02-10 11:13:34Z 594a66d7 pytorch_linux_xenial_py3_6_gcc5_4_test 0.36s
2021-02-10 11:13:34Z 594a66d7 pytorch_linux_xenial_py3_6_gcc7_test 0.573s errored 2021-02-10 11:13:34Z 594a66d7 pytorch_linux_xenial_py3_6_gcc7_test 0.573s errored
@ -211,7 +211,7 @@ Example:
Another multiline example, this time with the --all flag: Another multiline example, this time with the --all flag:
$ tools/test_history.py --mode=multiline --all --ref=321b9 --delta=12 --sha-length=8 \ $ tools/stats/test_history.py --mode=multiline --all --ref=321b9 --delta=12 --sha-length=8 \
--test=test_qr_square_many_batched_complex_cuda --test=test_qr_square_many_batched_complex_cuda
2021-01-07 10:04:56Z 321b9883 pytorch_linux_xenial_cuda10_2_cudnn7_py3_gcc7_test2 424.284s 2021-01-07 10:04:56Z 321b9883 pytorch_linux_xenial_cuda10_2_cudnn7_py3_gcc7_test2 424.284s
2021-01-07 10:04:56Z 321b9883 pytorch_linux_xenial_cuda10_2_cudnn7_py3_slow_test 0.006s skipped 2021-01-07 10:04:56Z 321b9883 pytorch_linux_xenial_cuda10_2_cudnn7_py3_slow_test 0.006s skipped
@ -226,7 +226,7 @@ In columns mode, the name of the job isn't printed, but the order of the
columns is guaranteed to match the order of the jobs passed on the columns is guaranteed to match the order of the jobs passed on the
command line. Example: command line. Example:
$ tools/test_history.py --mode=columns --ref=3cf783 --sha-length=8 --test=test_set_dir \ $ tools/stats/test_history.py --mode=columns --ref=3cf783 --sha-length=8 --test=test_set_dir \
--job pytorch_linux_xenial_py3_6_gcc5_4_test --job pytorch_linux_xenial_py3_6_gcc7_test --job pytorch_linux_xenial_py3_6_gcc5_4_test --job pytorch_linux_xenial_py3_6_gcc7_test
2021-02-10 12:18:50Z 3cf78395 0.644s 0.312s 2021-02-10 12:18:50Z 3cf78395 0.644s 0.312s
2021-02-10 11:13:34Z 594a66d7 0.360s errored 2021-02-10 11:13:34Z 594a66d7 0.360s errored

View File

@ -1,6 +1,6 @@
import unittest import unittest
from tools import mypy_wrapper from tools.linter import mypy_wrapper
class TestMypyWrapper(unittest.TestCase): class TestMypyWrapper(unittest.TestCase):

View File

@ -2,11 +2,11 @@
import unittest import unittest
from typing import Dict, List from typing import Dict, List
from tools import print_test_stats from tools.stats import print_test_stats
from tools.stats_utils.s3_stat_parser import (Commit, Report, ReportMetaMeta, from tools.stats.s3_stat_parser import (Commit, Report, ReportMetaMeta,
Status, Version1Case, Status, Version1Case,
Version1Report, Version2Case, Version1Report, Version2Case,
Version2Report) Version2Report)
def fakehash(char: str) -> str: def fakehash(char: str) -> str:

View File

@ -4,7 +4,7 @@ import shlex
import unittest import unittest
from typing import List, Optional from typing import List, Optional
from tools import test_history from tools.stats import test_history
from typing_extensions import TypedDict from typing_extensions import TypedDict

View File

@ -0,0 +1,112 @@
import random
import unittest
from tools.testing.test_selections import calculate_shards
from typing import Dict, List, Tuple
class TestCalculateShards(unittest.TestCase):
tests: List[str] = [
'super_long_test',
'long_test1',
'long_test2',
'normal_test1',
'normal_test2',
'normal_test3',
'short_test1',
'short_test2',
'short_test3',
'short_test4',
'short_test5',
]
test_times: Dict[str, float] = {
'super_long_test': 55,
'long_test1': 22,
'long_test2': 18,
'normal_test1': 9,
'normal_test2': 7,
'normal_test3': 5,
'short_test1': 1,
'short_test2': 0.6,
'short_test3': 0.4,
'short_test4': 0.3,
'short_test5': 0.01,
}
def assert_shards_equal(
self,
expected_shards: List[Tuple[float, List[str]]],
actual_shards: List[Tuple[float, List[str]]]
) -> None:
for expected, actual in zip(expected_shards, actual_shards):
self.assertAlmostEqual(expected[0], actual[0])
self.assertListEqual(expected[1], actual[1])
def test_calculate_2_shards_with_complete_test_times(self) -> None:
expected_shards = [
(60, ['super_long_test', 'normal_test3']),
(58.31, ['long_test1', 'long_test2', 'normal_test1', 'normal_test2', 'short_test1', 'short_test2',
'short_test3', 'short_test4', 'short_test5'])
]
self.assert_shards_equal(expected_shards, calculate_shards(2, self.tests, self.test_times))
def test_calculate_5_shards_with_complete_test_times(self) -> None:
expected_shards = [
(55.0, ['super_long_test']),
(22.0, ['long_test1', ]),
(18.0, ['long_test2', ]),
(11.31, ['normal_test1', 'short_test1', 'short_test2', 'short_test3', 'short_test4', 'short_test5']),
(12.0, ['normal_test2', 'normal_test3']),
]
self.assert_shards_equal(expected_shards, calculate_shards(5, self.tests, self.test_times))
def test_calculate_2_shards_with_incomplete_test_times(self) -> None:
incomplete_test_times = {k: v for k, v in self.test_times.items() if 'test1' in k}
expected_shards = [
(22.0, ['long_test1', 'long_test2', 'normal_test3', 'short_test3', 'short_test5']),
(10.0, ['normal_test1', 'short_test1', 'super_long_test', 'normal_test2', 'short_test2', 'short_test4']),
]
self.assert_shards_equal(expected_shards, calculate_shards(2, self.tests, incomplete_test_times))
def test_calculate_5_shards_with_incomplete_test_times(self) -> None:
incomplete_test_times = {k: v for k, v in self.test_times.items() if 'test1' in k}
expected_shards = [
(22.0, ['long_test1', 'normal_test2', 'short_test5']),
(9.0, ['normal_test1', 'normal_test3']),
(1.0, ['short_test1', 'short_test2']),
(0.0, ['super_long_test', 'short_test3']),
(0.0, ['long_test2', 'short_test4']),
]
self.assert_shards_equal(expected_shards, calculate_shards(5, self.tests, incomplete_test_times))
def test_calculate_2_shards_against_optimal_shards(self) -> None:
for _ in range(100):
random.seed(120)
random_times = {k: random.random() * 10 for k in self.tests}
# all test times except first two
rest_of_tests = [i for k, i in random_times.items() if k != 'super_long_test' and k != 'long_test1']
sum_of_rest = sum(rest_of_tests)
random_times['super_long_test'] = max(sum_of_rest / 2, max(rest_of_tests))
random_times['long_test1'] = sum_of_rest - random_times['super_long_test']
# An optimal sharding would look like the below, but we don't need to compute this for the test:
# optimal_shards = [
# (sum_of_rest, ['super_long_test', 'long_test1']),
# (sum_of_rest, [i for i in self.tests if i != 'super_long_test' and i != 'long_test1']),
# ]
calculated_shards = calculate_shards(2, self.tests, random_times)
max_shard_time = max(calculated_shards[0][0], calculated_shards[1][0])
if sum_of_rest != 0:
# The calculated shard should not have a ratio worse than 7/6 for num_shards = 2
self.assertGreaterEqual(7.0 / 6.0, max_shard_time / sum_of_rest)
sorted_tests = sorted(self.tests)
sorted_shard_tests = sorted(calculated_shards[0][1] + calculated_shards[1][1])
# All the tests should be represented by some shard
self.assertEqual(sorted_tests, sorted_shard_tests)
if __name__ == '__main__':
unittest.main()

View File

@ -1,4 +1,4 @@
from tools import trailing_newlines from tools.linter import trailing_newlines
import unittest import unittest
import tempfile import tempfile

View File

@ -1,7 +1,7 @@
import re import re
import unittest import unittest
from tools.translate_annotations import parse_annotation, parse_diff, translate from tools.linter.translate_annotations import parse_annotation, parse_diff, translate
flake8_regex \ flake8_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)' = r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)'

View File