mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Followup to https://github.com/pytorch/pytorch/issues/62288 Front loads the logic and also force smoke tests to run on only one shard. Pull Request resolved: https://github.com/pytorch/pytorch/pull/62344 Test Plan: Note that for the windows cuda10 run on PR, we get only 1 shard with the smoke tests running: https://github.com/pytorch/pytorch/pull/62344/checks?check_run_id=3194294041 Reviewed By: seemethere, heitorschueroff Differential Revision: D29991573 Pulled By: janeyx99 fbshipit-source-id: 263d7de72c7a82a7205932914c32d39892294cad
71 lines
2.5 KiB
Python
Executable File
71 lines
2.5 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
"""Generates a matrix to be utilized through github actions
|
|
|
|
Will output a matrix to represent our testing configurations, which is currently
|
|
dictated by just sharding.
|
|
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
from typing import Dict
|
|
|
|
from typing_extensions import TypedDict
|
|
|
|
|
|
class Config(TypedDict):
|
|
num_shards: int
|
|
runner: str
|
|
|
|
|
|
def main() -> None:
|
|
TEST_RUNNER_TYPE = os.getenv('TEST_RUNNER_TYPE')
|
|
assert TEST_RUNNER_TYPE is not None
|
|
ON_PULL_REQUEST = os.getenv('GITHUB_HEAD_REF')
|
|
NUM_TEST_SHARDS_ON_PULL_REQUEST = os.getenv('NUM_TEST_SHARDS_ON_PULL_REQUEST')
|
|
NUM_TEST_SHARDS = int(os.getenv('NUM_TEST_SHARDS', '1'))
|
|
if ON_PULL_REQUEST and NUM_TEST_SHARDS_ON_PULL_REQUEST:
|
|
NUM_TEST_SHARDS = int(NUM_TEST_SHARDS_ON_PULL_REQUEST)
|
|
MULTIGPU_RUNNER_TYPE = os.getenv('MULTIGPU_RUNNER_TYPE')
|
|
NOGPU_RUNNER_TYPE = os.getenv('NOGPU_RUNNER_TYPE')
|
|
configs: Dict[str, Config] = {}
|
|
if os.getenv('ENABLE_JIT_LEGACY_TEST'):
|
|
configs['jit_legacy'] = {'num_shards': 1, 'runner': TEST_RUNNER_TYPE}
|
|
if MULTIGPU_RUNNER_TYPE is not None and os.getenv('ENABLE_MULTIGPU_TEST'):
|
|
configs['multigpu'] = {'num_shards': 1, 'runner': MULTIGPU_RUNNER_TYPE}
|
|
if NOGPU_RUNNER_TYPE is not None and os.getenv('ENABLE_NOGPU_NO_AVX_TEST'):
|
|
configs['nogpu_NO_AVX'] = {'num_shards': 1, 'runner': NOGPU_RUNNER_TYPE}
|
|
if NOGPU_RUNNER_TYPE is not None and os.getenv('ENABLE_NOGPU_NO_AVX2_TEST'):
|
|
configs['nogpu_NO_AVX2'] = {'num_shards': 1, 'runner': NOGPU_RUNNER_TYPE}
|
|
if os.getenv('ENABLE_SLOW_TEST'):
|
|
configs['slow'] = {'num_shards': 1, 'runner': TEST_RUNNER_TYPE}
|
|
matrix = {
|
|
'include': [
|
|
{
|
|
'config': 'default',
|
|
'shard': shard,
|
|
'num_shards': NUM_TEST_SHARDS,
|
|
'runner': TEST_RUNNER_TYPE,
|
|
}
|
|
for shard in range(1, NUM_TEST_SHARDS + 1)
|
|
] + [
|
|
{
|
|
'config': name,
|
|
'shard': shard,
|
|
'num_shards': config['num_shards'],
|
|
'runner': config['runner'],
|
|
}
|
|
for name, config in configs.items()
|
|
for shard in range(1, config['num_shards'] + 1)
|
|
]
|
|
}
|
|
render_matrix = {'config': list(dict.fromkeys(x['config'] for x in matrix['include']))}
|
|
print(json.dumps({'matrix': matrix, 'render-matrix': render_matrix}, indent=2))
|
|
print(f'::set-output name=matrix::{json.dumps(matrix)}')
|
|
print(f'::set-output name=render-matrix::{json.dumps(render_matrix)}')
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|