Enable automigration for windows jobs (#129977)

Enable Windows jobs to automatically use LF runners when the author is opted-in

Pull Request resolved: https://github.com/pytorch/pytorch/pull/129977
Approved by: https://github.com/clee2000
This commit is contained in:
Zain Rizvi 2024-07-03 22:02:56 +00:00 committed by PyTorch MergeBot
parent a79bb8db91
commit b0d0114f5b
4 changed files with 27 additions and 15 deletions

View File

@ -39,7 +39,7 @@ on:
type: string
default: "linux.2xlarge"
description: |
List of CUDA architectures CI build should target.
Label of the runner this job should run on.
test-matrix:
required: false
type: string

View File

@ -30,6 +30,12 @@ on:
An option JSON description of what test configs to run later on. This
is moved here from the Linux test workflow so that we can apply filter
logic using test-config labels earlier and skip unnecessary builds
runner:
required: false
type: string
default: "windows.4xlarge.nonephemeral"
description: |
Label of the runner this job should run on.
outputs:
test-matrix:
@ -43,7 +49,7 @@ jobs:
build:
# Don't run on forked repos.
if: github.repository_owner == 'pytorch'
runs-on: [self-hosted, windows.4xlarge.nonephemeral]
runs-on: ${{ inputs.runner }}
timeout-minutes: 240
outputs:
test-matrix: ${{ steps.filter.outputs.test-matrix }}

View File

@ -383,15 +383,17 @@ jobs:
if: github.event_name == 'pull_request'
name: win-vs2019-cpu-py3
uses: ./.github/workflows/_win-build.yml
needs: get-label-type
with:
build-environment: win-vs2019-cpu-py3
cuda-version: cpu
sync-tag: win-cpu-build
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
{ config: "default", shard: 2, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
{ config: "default", shard: 3, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
{ config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
]}
linux-focal-cpu-py3_10-gcc9-bazel-test:

View File

@ -179,15 +179,17 @@ jobs:
win-vs2019-cpu-py3-build:
name: win-vs2019-cpu-py3
uses: ./.github/workflows/_win-build.yml
needs: get-label-type
with:
build-environment: win-vs2019-cpu-py3
cuda-version: cpu
sync-tag: win-cpu-build
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
{ config: "default", shard: 2, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
{ config: "default", shard: 3, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
{ config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
]}
win-vs2019-cpu-py3-test:
@ -204,19 +206,21 @@ jobs:
win-vs2019-cuda11_8-py3-build:
name: win-vs2019-cuda11.8-py3
uses: ./.github/workflows/_win-build.yml
needs: get-label-type
with:
build-environment: win-vs2019-cuda11.8-py3
cuda-version: "11.8"
sync-tag: win-cuda-build
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 6, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
{ config: "force_on_cpu", shard: 1, num_shards: 1, runner: "windows.4xlarge.nonephemeral" },
{ config: "default", shard: 1, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 2, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 3, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 4, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 5, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}windows.g5.4xlarge.nvidia.gpu" },
{ config: "default", shard: 6, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}windows.g5.4xlarge.nvidia.gpu" },
{ config: "force_on_cpu", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
]}
linux-focal-rocm6_1-py3_8-build: