[EZ] Remove remaining amz2023 runner variant references (#136540)

Validated no jobs use the amz2023 runner variant anymore ([proof](https://github.com/search?type=code&q=org%3Apytorch+%2F%5Cbamz2023%5Cb%2F+&p=1)) so removing all references to it

Explicit references to the amz2023 runner type variants were removed in the following PRs:
- https://github.com/pytorch/ignite/pull/3285
- https://github.com/pytorch/ao/pull/887
- https://github.com/pytorch/fbscribelogger/pull/1
- https://github.com/pytorch/pytorch/pull/134355

Pull Request resolved: https://github.com/pytorch/pytorch/pull/136540
Approved by: https://github.com/huydhn, https://github.com/malfet
This commit is contained in:
Zain Rizvi 2024-09-25 19:00:58 +00:00 committed by PyTorch MergeBot
parent 9c2c61d2dd
commit 37f340c1e5
5 changed files with 2 additions and 50 deletions

View File

@ -32,30 +32,6 @@ self-hosted-runner:
- lf.linux.8xlarge.nvidia.gpu
- lf.linux.16xlarge.nvidia.gpu
- lf.linux.g5.4xlarge.nvidia.gpu
# Organization-wide AWS Linux Runners with new Amazon 2023 AMI
- amz2023.linux.large
- amz2023.linux.2xlarge
- amz2023.linux.4xlarge
- amz2023.linux.12xlarge
- amz2023.linux.24xlarge
- amz2023.linux.arm64.2xlarge
- amz2023.linux.arm64.m7g.4xlarge
- amz2023.linux.arm64.m7g.4xlarge.ephemeral
- amz2023.linux.4xlarge.nvidia.gpu
- amz2023.linux.8xlarge.nvidia.gpu
- amz2023.linux.16xlarge.nvidia.gpu
- amz2023.linux.g5.4xlarge.nvidia.gpu
# Pytorch/pytorch AWS Linux Runners with the new Amazon 2023 AMI on Linux Foundation account
- amz2023.lf.linux.large
- amz2023.lf.linux.2xlarge
- amz2023.lf.linux.4xlarge
- amz2023.lf.linux.12xlarge
- amz2023.lf.linux.24xlarge
- amz2023.lf.linux.arm64.2xlarge
- amz2023.lf.linux.4xlarge.nvidia.gpu
- amz2023.lf.linux.8xlarge.nvidia.gpu
- amz2023.lf.linux.16xlarge.nvidia.gpu
- amz2023.lf.linux.g5.4xlarge.nvidia.gpu
# Repo-specific IBM hosted S390x runner
- linux.s390x
# Organization wide AWS Windows runners

View File

@ -36,9 +36,6 @@ runner_types:
max_available: 1000
os: linux
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
variants:
amz2023:
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
lf.c.linux.10xlarge.avx2:
disk_size: 200
instance_type: m4.10xlarge
@ -112,9 +109,6 @@ runner_types:
max_available: 1000
os: linux
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
variants:
amz2023:
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
lf.c.linux.4xlarge.nvidia.gpu:
disk_size: 150
instance_type: g3.4xlarge
@ -129,9 +123,6 @@ runner_types:
max_available: 400
os: linux
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
variants:
amz2023:
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
lf.c.linux.g4dn.12xlarge.nvidia.gpu:
disk_size: 150
instance_type: g4dn.12xlarge
@ -174,9 +165,6 @@ runner_types:
max_available: 50
os: linux
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
variants:
amz2023:
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
lf.c.linux.large:
max_available: 1200
disk_size: 15

View File

@ -36,9 +36,6 @@ runner_types:
max_available: 1000
os: linux
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
variants:
amz2023:
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
lf.linux.10xlarge.avx2:
disk_size: 200
instance_type: m4.10xlarge
@ -112,9 +109,6 @@ runner_types:
max_available: 1000
os: linux
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
variants:
amz2023:
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
lf.linux.4xlarge.nvidia.gpu:
disk_size: 150
instance_type: g3.4xlarge
@ -129,9 +123,6 @@ runner_types:
max_available: 400
os: linux
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
variants:
amz2023:
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
lf.linux.g4dn.12xlarge.nvidia.gpu:
disk_size: 150
instance_type: g4dn.12xlarge
@ -174,9 +165,6 @@ runner_types:
max_available: 50
os: linux
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
variants:
amz2023:
ami: al2023-ami-2023.5.20240701.0-kernel-6.1-x86_64
lf.linux.large:
max_available: 1200
disk_size: 15

View File

@ -40,7 +40,7 @@ struct TorchBenchmarkCompileTimeLogEntry {
# The weight of the record according to current sampling rate
25: optional i64 weight;
# The name of the current job. Derived from JOB_NAME, e.g., linux-jammy-py3.8-gcc11 / test (default, 3, 4, amz2023.linux.2xlarge).
# The name of the current job. Derived from JOB_NAME, e.g., linux-jammy-py3.8-gcc11 / test (default, 3, 4, linux.2xlarge).
26: optional string github_job;
# The GitHub user who triggered the job. Derived from GITHUB_TRIGGERING_ACTOR.

View File

@ -44,7 +44,7 @@ struct TorchOpenSourceSignpostLogEntry {
# A unique number for each run of a particular workflow in a repository, e.g., 238742. Derived from GITHUB_RUN_NUMBER.
10: optional string github_run_number_str;
# The name of the current job. Derived from JOB_NAME, e.g., linux-jammy-py3.8-gcc11 / test (default, 3, 4, amz2023.linux.2xlarge).
# The name of the current job. Derived from JOB_NAME, e.g., linux-jammy-py3.8-gcc11 / test (default, 3, 4, linux.2xlarge).
11: optional string job_name;
# The GitHub user who triggered the job. Derived from GITHUB_TRIGGERING_ACTOR.