mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/60167
We were getting errors such as this on Windows in our c10d ProcessGroup test suite:
```
test_send_recv_all_to_all (__main__.ProcessGroupGlooTest) ... Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Jenkins\Miniconda3\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "C:\Jenkins\Miniconda3\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\circleci\project\build\win_tmp\build\torch\testing\_internal\common_distributed.py", line 471, in _event_listener
if pipe.poll(None):
File "C:\Jenkins\Miniconda3\lib\multiprocessing\connection.py", line 257, in poll
return self._poll(timeout)
File "C:\Jenkins\Miniconda3\lib\multiprocessing\connection.py", line 330, in _poll
return bool(wait([self], timeout))
File "C:\Jenkins\Miniconda3\lib\multiprocessing\connection.py", line 883, in wait
ov.cancel()
OSError: [WinError 6] The handle is invalid
Fatal Python error: could not acquire lock for <_io.BufferedWriter name='<stderr>'> at interpreter shutdown, possibly due to daemon threads
Python runtime state: finalizing (tstate=000001EFDF228CE0)
Thread 0x00001f68 (most recent call first):
File "C:\Jenkins\Miniconda3\lib\threading.py", line 1202 in invoke_excepthook
File "C:\Jenkins\Miniconda3\lib\threading.py", line 934 in _bootstrap_inner
File "C:\Jenkins\Miniconda3\lib\threading.py", line 890 in _bootstrap
Current thread 0x00000f94 (most recent call first):
<no Python frame>
FAIL (5.009s)
```
And the process would then exit with error code 3221226505.
See: https://app.circleci.com/pipelines/github/pytorch/pytorch/337351/workflows/ad919a3e-fe9a-4566-8ad6-8b0a252f730c/jobs/14170191/steps
By looking at [the code of `_event_listener` in `common_distributed.py`](eb36f67dcc/torch/testing/_internal/common_distributed.py (L467-L489)) I think that the first exception (the one about the handle being invalid) is "expected" as it results from another thread purposely closing the pipe while that thread is polling it.
The relevant part of the problem seems to be the "could not acquire lock" one. I think this stems from the event listener thread being launched as a daemon thread, which means the interpreter will not wait for that thread to complete before shutting down. When the interpreter shuts down it instantly aborts all other threads. If the event listener thread was aborter _while_ it was logging to stdout then that thread was holding the lock but never got to release it. This is probably what the error is complaining about. This seems to be intended/expected behavior for CPython: https://bugs.python.org/issue42717.
The solution thus is simple: don't make that thread a daemon thread and explicitly wait for it to terminate before shutting down.
ghstack-source-id: 132293710
Test Plan: Will see...
Reviewed By: pritamdamania87
Differential Revision: D29193014
fbshipit-source-id: 4aabe1fc74bf9c54ca605e7a702ac99655489780
674 lines
25 KiB
Python
674 lines
25 KiB
Python
from contextlib import contextmanager
|
|
from datetime import timedelta
|
|
from enum import Enum
|
|
import faulthandler
|
|
import multiprocessing
|
|
from multiprocessing import Manager
|
|
from io import StringIO
|
|
import os
|
|
import sys
|
|
import tempfile
|
|
import threading
|
|
import time
|
|
import unittest
|
|
import logging
|
|
import traceback
|
|
import types
|
|
|
|
from typing import NamedTuple, Optional, Union
|
|
from functools import wraps
|
|
|
|
import torch
|
|
import torch.distributed as c10d
|
|
import torch.cuda.nccl
|
|
|
|
from functools import partial, reduce
|
|
from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM, FILE_SCHEMA, find_free_port, retry_on_connect_failures
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class TestSkip(NamedTuple):
|
|
exit_code: int
|
|
message: str
|
|
|
|
|
|
TEST_SKIPS = {
|
|
"backend_unavailable": TestSkip(72, "Skipped because distributed backend is not available."),
|
|
"small_worldsize": TestSkip(73, "Skipped due to small world size."),
|
|
"no_cuda": TestSkip(74, "CUDA is not available."),
|
|
"multi-gpu": TestSkip(75, "Need at least 2 CUDA devices"),
|
|
"nccl": TestSkip(76, "c10d not compiled with NCCL support"),
|
|
"skipIfRocm": TestSkip(78, "Test skipped for ROCm"),
|
|
"no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"),
|
|
}
|
|
|
|
|
|
def skip_if_no_gpu(func):
|
|
""" Nccl multigpu tests require at least 2 GPUS. Skip if this is not met"""
|
|
@wraps(func)
|
|
def wrapper(*args, **kwargs):
|
|
if not torch.cuda.is_available():
|
|
sys.exit(TEST_SKIPS["no_cuda"].exit_code)
|
|
if torch.cuda.device_count() < int(os.environ["WORLD_SIZE"]):
|
|
message = "Need at least {} CUDA devices".format(os.environ["WORLD_SIZE"])
|
|
TEST_SKIPS["multi-gpu"] = TestSkip(75, message)
|
|
sys.exit(TEST_SKIPS["multi-gpu"].exit_code)
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
|
|
def skip_if_small_worldsize(func):
|
|
@wraps(func)
|
|
def wrapper(*args, **kwargs):
|
|
if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2:
|
|
sys.exit(TEST_SKIPS["small_worldsize"].exit_code)
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
|
|
def require_n_gpus_for_nccl_backend(n, backend):
|
|
def decorator(func):
|
|
@wraps(func)
|
|
def wrapper(*args, **kwargs):
|
|
if backend == "nccl" and torch.cuda.device_count() < n:
|
|
message = "Need at least {} CUDA devices".format(n)
|
|
TEST_SKIPS["multi-gpu"] = TestSkip(75, message)
|
|
sys.exit(TEST_SKIPS['multi-gpu'].exit_code)
|
|
else:
|
|
return func(*args, **kwargs)
|
|
return wrapper
|
|
|
|
return decorator
|
|
|
|
|
|
def skip_if_lt_x_gpu(x):
|
|
def decorator(func):
|
|
@wraps(func)
|
|
def wrapper(*args, **kwargs):
|
|
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
|
|
return func(*args, **kwargs)
|
|
message = "Need at least {} CUDA devices".format(x)
|
|
TEST_SKIPS["multi-gpu"] = TestSkip(75, message)
|
|
sys.exit(TEST_SKIPS['multi-gpu'].exit_code)
|
|
return wrapper
|
|
|
|
return decorator
|
|
|
|
|
|
# This decorator helps avoiding initializing cuda while testing other backends
|
|
def nccl_skip_if_lt_x_gpu(backend, x):
|
|
def decorator(func):
|
|
@wraps(func)
|
|
def wrapper(*args, **kwargs):
|
|
if backend != "nccl":
|
|
return func(*args, **kwargs)
|
|
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
|
|
return func(*args, **kwargs)
|
|
message = "Need at least {} CUDA devices".format(x)
|
|
TEST_SKIPS["multi-gpu"] = TestSkip(75, message)
|
|
sys.exit(TEST_SKIPS['multi-gpu'].exit_code)
|
|
return wrapper
|
|
|
|
return decorator
|
|
|
|
|
|
def verify_ddp_error_logged(model_DDP, err_substr):
|
|
# Verify error was logged in ddp_logging_data.
|
|
ddp_logging_data = model_DDP._get_ddp_logging_data()
|
|
assert "has_error" in ddp_logging_data
|
|
assert "error" in ddp_logging_data
|
|
assert err_substr in ddp_logging_data["error"]
|
|
|
|
def with_nccl_blocking_wait(func):
|
|
"""
|
|
Convenience decorator to set/unset NCCL_BLOCKING_WAIT flag. Note that use of
|
|
this decorator will override the setting of NCCL_ASYNC_ERROR_HANDLING for
|
|
the particular test. After the test, both NCCL_BLOCKING_WAIT and
|
|
NCCL_ASYNC_ERROR_HANDLING will be restored to their original values.
|
|
"""
|
|
@wraps(func)
|
|
def wrapper(*args, **kwargs):
|
|
# Save and unset NCCL_ASYNC_ERROR_HANDLING
|
|
try:
|
|
cached_nccl_async_error_handling: Union[str, None] = os.environ[
|
|
"NCCL_ASYNC_ERROR_HANDLING"
|
|
]
|
|
del os.environ["NCCL_ASYNC_ERROR_HANDLING"]
|
|
except KeyError:
|
|
# NCCL_ASYNC_ERROR_HANDLING was unset
|
|
cached_nccl_async_error_handling = None
|
|
|
|
# Save val of NCCL_BLOCKING_WAIT and set it.
|
|
try:
|
|
cached_nccl_blocking_wait: Union[str, None] = os.environ[
|
|
"NCCL_BLOCKING_WAIT"
|
|
]
|
|
except KeyError:
|
|
cached_nccl_blocking_wait = None
|
|
finally:
|
|
os.environ["NCCL_BLOCKING_WAIT"] = "1"
|
|
|
|
try:
|
|
ret = func(*args, **kwargs)
|
|
return ret
|
|
finally:
|
|
# restore old values.
|
|
if cached_nccl_async_error_handling is not None:
|
|
os.environ[
|
|
"NCCL_ASYNC_ERROR_HANDLING"
|
|
] = cached_nccl_async_error_handling
|
|
|
|
if cached_nccl_blocking_wait is not None:
|
|
os.environ["NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait
|
|
|
|
return wrapper
|
|
|
|
|
|
def with_dist_debug_levels(levels):
|
|
"""
|
|
Runs a test for each distributed debug level specified in levels.
|
|
"""
|
|
def decorator(func):
|
|
@wraps(func)
|
|
def wrapper(*args, **kwargs):
|
|
old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None)
|
|
for level in levels:
|
|
os.environ["TORCH_DISTRIBUTED_DEBUG"] = level
|
|
ret = func(*args, **kwargs)
|
|
if old_level is not None:
|
|
os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level
|
|
# Only returns test return for last test, but since these are
|
|
# unittests the return value is not really used and earlier tests
|
|
# would've raised had they failed.
|
|
return ret
|
|
|
|
return wrapper
|
|
|
|
return decorator
|
|
|
|
|
|
def requires_gloo():
|
|
return unittest.skipUnless(
|
|
c10d.is_gloo_available(),
|
|
"c10d was not compiled with the Gloo backend",
|
|
)
|
|
|
|
|
|
def requires_nccl_version(version, msg):
|
|
if not c10d.is_nccl_available():
|
|
return unittest.skip(
|
|
"c10d was not compiled with the NCCL backend",
|
|
)
|
|
else:
|
|
return unittest.skipIf(
|
|
torch.cuda.nccl.version() < version,
|
|
"Requires NCCL version greater than or equal to: {}, found: {}, reason: {}".format(
|
|
version,
|
|
torch.cuda.nccl.version(), msg),
|
|
)
|
|
|
|
|
|
def requires_nccl():
|
|
return unittest.skipUnless(
|
|
c10d.is_nccl_available(),
|
|
"c10d was not compiled with the NCCL backend",
|
|
)
|
|
|
|
|
|
def requires_mpi():
|
|
return unittest.skipUnless(
|
|
c10d.is_mpi_available(),
|
|
"c10d was not compiled with the MPI backend",
|
|
)
|
|
|
|
|
|
def skip_if_rocm_single_process(func):
|
|
"""Skips a test for ROCm in a single process environment"""
|
|
func.skip_if_rocm = True
|
|
|
|
@wraps(func)
|
|
def wrapper(*args, **kwargs):
|
|
if not TEST_WITH_ROCM:
|
|
return func(*args, **kwargs)
|
|
raise unittest.SkipTest("Test skipped for ROCm")
|
|
|
|
return wrapper
|
|
|
|
|
|
def skip_if_rocm(func):
|
|
"""Skips a test for ROCm"""
|
|
func.skip_if_rocm = True
|
|
|
|
@wraps(func)
|
|
def wrapper(*args, **kwargs):
|
|
if not TEST_WITH_ROCM:
|
|
return func(*args, **kwargs)
|
|
sys.exit(TEST_SKIPS['skipIfRocm'].exit_code)
|
|
|
|
return wrapper
|
|
|
|
|
|
def skip_if_win32():
|
|
return unittest.skipIf(
|
|
sys.platform == 'win32',
|
|
"This unit test case is not supportted on Windows platform",
|
|
)
|
|
|
|
|
|
@retry_on_connect_failures
|
|
def create_tcp_store(addr="localhost", world_size=1, is_master=True, timeout=timedelta(minutes=5),
|
|
wait_for_workers=True, jit_class=False):
|
|
"""
|
|
Creates a TCP store. Retries if the chosen port is already in use.
|
|
"""
|
|
port = find_free_port()
|
|
if jit_class:
|
|
timeout_millisecond = int(timeout / timedelta(milliseconds=1))
|
|
return torch.classes.dist_c10d.TCPStore(addr, port, world_size, is_master, timeout_millisecond)
|
|
else:
|
|
return c10d.TCPStore(addr, port, world_size, is_master, wait_for_workers=wait_for_workers)
|
|
|
|
|
|
TIMEOUT_DEFAULT = 100
|
|
TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400}
|
|
|
|
|
|
def create_device(interface=None):
|
|
if sys.platform == 'win32' or interface is None:
|
|
return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1")
|
|
else:
|
|
return c10d.ProcessGroupGloo.create_device(interface=interface)
|
|
|
|
|
|
def get_timeout(test_id) -> int:
|
|
return TIMEOUT_OVERRIDE.get(test_id.split('.')[-1], TIMEOUT_DEFAULT)
|
|
|
|
|
|
@contextmanager
|
|
def captured_output():
|
|
new_out, new_err = StringIO(), StringIO()
|
|
old_out, old_err = sys.stdout, sys.stderr
|
|
try:
|
|
sys.stdout, sys.stderr = new_out, new_err
|
|
yield sys.stdout, sys.stderr
|
|
finally:
|
|
sys.stdout, sys.stderr = old_out, old_err
|
|
|
|
|
|
def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1):
|
|
"""
|
|
Generate a number of basic test cases for sparse reduction.
|
|
These cover tensors with a varying number of sparse dimensions and a varying
|
|
number of dense dimensions. The only reduction operation we support is sum.
|
|
"""
|
|
def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0):
|
|
# First sparse dimension is [0..rank].
|
|
# Subsequent dimensions are always 0, so we know there is
|
|
# a non-empty intersection between any two sparse tensors.
|
|
indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
|
|
shape = [world_size] + [2 for _ in range(dense_dims)]
|
|
for _ in range(sparse_dims - 1):
|
|
indices = torch.cat((indices, torch.zeros(1, rank + 1)))
|
|
shape.append(world_size)
|
|
values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
|
|
return torch.sparse_coo_tensor(indices, values, shape)
|
|
|
|
def compute_sum(fn, world_size: int):
|
|
return reduce(lambda a, b: a + b, [fn(rank, world_size) for rank in range(world_size)])
|
|
|
|
return [
|
|
(
|
|
[
|
|
fn(num_inputs * rank + i, num_inputs * world_size)
|
|
for i in range(num_inputs)
|
|
],
|
|
[
|
|
compute_sum(fn, num_inputs * world_size)
|
|
for i in range(num_inputs)
|
|
],
|
|
)
|
|
for fn in [
|
|
partial(generate, sparse_dims=1),
|
|
partial(generate, sparse_dims=2),
|
|
partial(generate, sparse_dims=3),
|
|
partial(generate, dense_dims=1),
|
|
partial(generate, dense_dims=2),
|
|
partial(generate, dense_dims=3),
|
|
]
|
|
]
|
|
|
|
tmp_dir: Optional[tempfile.TemporaryDirectory] = None
|
|
|
|
def initialize_temp_directories(init_method: Optional[str] = None) -> None:
|
|
global tmp_dir
|
|
tmp_dir = tempfile.TemporaryDirectory()
|
|
os.environ["TEMP_DIR"] = tmp_dir.name
|
|
os.mkdir(os.path.join(tmp_dir.name, "barrier"))
|
|
os.mkdir(os.path.join(tmp_dir.name, "test_dir"))
|
|
init_dir_path = os.path.join(tmp_dir.name, "init_dir")
|
|
os.mkdir(init_dir_path)
|
|
# Set init method if specified.
|
|
if init_method is not None:
|
|
os.environ["INIT_METHOD"] = init_method
|
|
else:
|
|
os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join(
|
|
init_dir_path, "shared_init_file"
|
|
)
|
|
|
|
def cleanup_temp_dir() -> None:
|
|
if tmp_dir is not None:
|
|
tmp_dir.cleanup()
|
|
|
|
# [How does MultiProcessTestCase work?]
|
|
# Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by
|
|
# default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an
|
|
# example which inherits from this class. Its `Setup()` methods calls into
|
|
# `MultiProcessTestCase._spawn_processes()` which spawns `world_size()`
|
|
# subprocesses. During the spawn, the main process passes the test name to
|
|
# subprocesses, and the name is acquired from self.id(). The subprocesses
|
|
# then use the provided test function name to retrieve the function attribute
|
|
# from the test instance and run it. The main process simply waits for all
|
|
# subprocesses to join.
|
|
|
|
|
|
class MultiProcessTestCase(TestCase):
|
|
MAIN_PROCESS_RANK = -1
|
|
# This exit code is used to indicate that the test code had an error and
|
|
# exited abnormally. There are certain tests that might use sys.exit() to
|
|
# simulate failures and in those cases, we can't have an exit code of 0,
|
|
# but we still want to ensure we didn't run into any other errors.
|
|
TEST_ERROR_EXIT_CODE = 10
|
|
|
|
# do not early terminate for distributed tests.
|
|
def _should_stop_test_suite(self) -> bool:
|
|
return False
|
|
|
|
@property
|
|
def world_size(self) -> int:
|
|
return 4
|
|
|
|
def join_or_run(self, fn):
|
|
@wraps(fn)
|
|
def wrapper(self):
|
|
if self.rank == self.MAIN_PROCESS_RANK:
|
|
self._join_processes(fn)
|
|
else:
|
|
fn()
|
|
return types.MethodType(wrapper, self)
|
|
|
|
# The main process spawns N subprocesses that run the test.
|
|
# Constructor patches current instance test method to
|
|
# assume the role of the main process and join its subprocesses,
|
|
# or run the underlying test function.
|
|
def __init__(self, method_name: str = 'runTest') -> None:
|
|
super().__init__(method_name)
|
|
fn = getattr(self, method_name)
|
|
setattr(self, method_name, self.join_or_run(fn))
|
|
|
|
def setUp(self) -> None:
|
|
super().setUp()
|
|
self.skip_return_code_checks = [] # type: ignore[var-annotated]
|
|
self.processes = [] # type: ignore[var-annotated]
|
|
self.rank = self.MAIN_PROCESS_RANK
|
|
self.file_name = tempfile.NamedTemporaryFile(delete=False).name
|
|
global TEST_SKIPS
|
|
self.old_test_skips = TEST_SKIPS.copy()
|
|
# pid to pipe consisting of error message from process.
|
|
self.pid_to_pipe = {} # type: ignore[var-annotated]
|
|
|
|
def tearDown(self) -> None:
|
|
super().tearDown()
|
|
for p in self.processes:
|
|
p.terminate()
|
|
# Each Process instance holds a few open file descriptors. The unittest
|
|
# runner creates a new TestCase instance for each test method and keeps
|
|
# it alive until the end of the entire suite. We must thus reset the
|
|
# processes to prevent an effective file descriptor leak.
|
|
self.processes = []
|
|
|
|
def _current_test_name(self) -> str:
|
|
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
|
|
return self.id().split(".")[-1]
|
|
|
|
def _start_processes(self, proc) -> None:
|
|
test_skips_manager = Manager()
|
|
test_skips = test_skips_manager.dict()
|
|
global TEST_SKIPS
|
|
test_skips.update(TEST_SKIPS)
|
|
TEST_SKIPS = test_skips
|
|
|
|
self.processes = []
|
|
for rank in range(int(self.world_size)):
|
|
parent_conn, child_conn = torch.multiprocessing.Pipe()
|
|
process = proc(
|
|
target=self.__class__._run,
|
|
name='process ' + str(rank),
|
|
args=(rank, self._current_test_name(), self.file_name, child_conn))
|
|
process.start()
|
|
logger.info(f'Started process {rank} with pid {process.pid}')
|
|
self.pid_to_pipe[process.pid] = parent_conn
|
|
self.processes.append(process)
|
|
|
|
def _fork_processes(self) -> None:
|
|
proc = torch.multiprocessing.get_context("fork").Process
|
|
self._start_processes(proc)
|
|
|
|
def _spawn_processes(self) -> None:
|
|
proc = torch.multiprocessing.get_context("spawn").Process
|
|
self._start_processes(proc)
|
|
|
|
class Event(Enum):
|
|
GET_TRACEBACK = 1
|
|
|
|
@staticmethod
|
|
def _event_listener(parent_pipe, signal_pipe, rank: int):
|
|
logger.info(f'Starting event listener thread for {rank}')
|
|
while True:
|
|
ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe])
|
|
|
|
if parent_pipe in ready_pipes:
|
|
|
|
if parent_pipe.closed:
|
|
logger.info(f'Pipe closed for process {rank}, stopping event listener thread')
|
|
return
|
|
|
|
event = parent_pipe.recv()
|
|
logger.info(f'Received event {event} on process {rank}')
|
|
|
|
if event == MultiProcessTestCase.Event.GET_TRACEBACK:
|
|
# Return traceback to the parent process.
|
|
with tempfile.NamedTemporaryFile(mode='r+') as tmp_file:
|
|
faulthandler.dump_traceback(tmp_file)
|
|
# Flush buffers and seek to read from the beginning
|
|
tmp_file.flush()
|
|
tmp_file.seek(0)
|
|
parent_pipe.send(tmp_file.read())
|
|
|
|
logger.info(f'Process {rank} sent traceback')
|
|
|
|
if signal_pipe in ready_pipes:
|
|
return
|
|
|
|
@classmethod
|
|
def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None:
|
|
self = cls(test_name)
|
|
|
|
# Start event listener thread.
|
|
signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False)
|
|
event_listener_thread = threading.Thread(
|
|
target=MultiProcessTestCase._event_listener,
|
|
args=(parent_pipe, signal_recv_pipe, rank),
|
|
daemon=True)
|
|
event_listener_thread.start()
|
|
|
|
self.rank = rank
|
|
self.file_name = file_name
|
|
self.run_test(test_name, parent_pipe, signal_send_pipe, event_listener_thread)
|
|
|
|
# exit to avoid run teardown() for fork processes
|
|
sys.exit(0)
|
|
|
|
def run_test(self, test_name: str, parent_pipe, signal_pipe=None, event_listener_thread=None) -> None:
|
|
if sys.platform != 'win32' and sys.platform != 'darwin':
|
|
# Register signal handler to dump stack traces on FATALs.
|
|
# Windows and MacOS do not support the signal handlers.
|
|
torch._C._set_print_stack_traces_on_fatal_signal(True)
|
|
|
|
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
|
|
# We're retrieving a corresponding test and executing it.
|
|
try:
|
|
getattr(self, test_name)()
|
|
except Exception as e:
|
|
logger.error(
|
|
f'Caught exception: \n{traceback.format_exc()} exiting '
|
|
'process with exit code: {MultiProcessTestCase.TEST_ERROR_EXIT_CODE}')
|
|
# Send error to parent process.
|
|
parent_pipe.send(traceback.format_exc())
|
|
sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
|
|
finally:
|
|
if signal_pipe is not None:
|
|
signal_pipe.send(None)
|
|
if event_listener_thread is not None:
|
|
event_listener_thread.join()
|
|
# Close pipe after done with test.
|
|
parent_pipe.close()
|
|
|
|
def _get_timedout_process_traceback(self) -> None:
|
|
pipes = []
|
|
for i, process in enumerate(self.processes):
|
|
if process.exitcode is None:
|
|
pipe = self.pid_to_pipe[process.pid]
|
|
try:
|
|
pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK)
|
|
pipes.append((i, pipe))
|
|
except ConnectionError as e:
|
|
logger.error(f'Encountered error while trying to get traceback for process {i}: {e}')
|
|
|
|
# Wait for results.
|
|
for rank, pipe in pipes:
|
|
try:
|
|
# Wait for traceback
|
|
if pipe.poll(5):
|
|
if pipe.closed:
|
|
logger.info(f'Pipe closed for process {rank}, cannot retrieve traceback')
|
|
continue
|
|
|
|
traceback = pipe.recv()
|
|
logger.error(f'Process {rank} timed out with traceback: \n\n{traceback}')
|
|
else:
|
|
logger.error(f'Could not retrieve traceback for timed out process: {rank}')
|
|
except ConnectionError as e:
|
|
logger.error(f'Encountered error while trying to get traceback for process {rank}: {e}')
|
|
|
|
def _join_processes(self, fn) -> None:
|
|
timeout = get_timeout(self.id())
|
|
start_time = time.time()
|
|
subprocess_error = False
|
|
try:
|
|
while True:
|
|
# check to see if any subprocess exited with an error early.
|
|
for (i, p) in enumerate(self.processes):
|
|
# This is the exit code processes exit with if they
|
|
# encountered an exception.
|
|
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE:
|
|
print(f'Process {i} terminated with exit code {p.exitcode}, terminating remaining processes.')
|
|
active_children = torch.multiprocessing.active_children()
|
|
for ac in active_children:
|
|
ac.terminate()
|
|
subprocess_error = True
|
|
break
|
|
if subprocess_error:
|
|
break
|
|
# All processes have joined cleanly if they all a valid exitcode
|
|
if all([p.exitcode is not None for p in self.processes]):
|
|
break
|
|
# Check if we should time out the test. If so, we terminate each process.
|
|
elapsed = time.time() - start_time
|
|
if elapsed > timeout:
|
|
self._get_timedout_process_traceback()
|
|
print(f'Timing out after {timeout} seconds and killing subprocesses.')
|
|
for p in self.processes:
|
|
p.terminate()
|
|
break
|
|
# Sleep to avoid excessive busy polling.
|
|
time.sleep(0.1)
|
|
|
|
elapsed_time = time.time() - start_time
|
|
|
|
if fn in self.skip_return_code_checks:
|
|
self._check_no_test_errors(elapsed_time)
|
|
else:
|
|
self._check_return_codes(elapsed_time)
|
|
finally:
|
|
# Close all pipes
|
|
for pid, pipe in self.pid_to_pipe.items():
|
|
pipe.close()
|
|
|
|
global TEST_SKIPS
|
|
TEST_SKIPS = self.old_test_skips
|
|
|
|
def _check_no_test_errors(self, elapsed_time) -> None:
|
|
"""
|
|
Checks that we didn't have any errors thrown in the child processes.
|
|
"""
|
|
for i, p in enumerate(self.processes):
|
|
if p.exitcode is None:
|
|
raise RuntimeError('Process {} timed out after {} seconds'.format(i, elapsed_time))
|
|
self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode)
|
|
|
|
def _check_return_codes(self, elapsed_time) -> None:
|
|
"""
|
|
Checks that the return codes of all spawned processes match, and skips
|
|
tests if they returned a return code indicating a skipping condition.
|
|
"""
|
|
first_process = self.processes[0]
|
|
# first, we check if there are errors in actual processes
|
|
# (via TEST_ERROR_EXIT CODE), and raise an exception for those.
|
|
# the reason we do this is to attempt to raise a more helpful error
|
|
# message than "Process x terminated/timed out"
|
|
# TODO: we should pipe the exception of the failed subprocess here.
|
|
# Currently, the actual exception is displayed as a logging output.
|
|
errored_processes = [
|
|
(i, p)
|
|
for i, p in enumerate(self.processes)
|
|
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE
|
|
]
|
|
if errored_processes:
|
|
error = ""
|
|
for i, process in errored_processes:
|
|
# Get error from pipe.
|
|
error_message = self.pid_to_pipe[process.pid].recv()
|
|
error += "Process {} exited with error code {} and exception:\n{}\n".format(
|
|
i, MultiProcessTestCase.TEST_ERROR_EXIT_CODE, error_message)
|
|
|
|
raise RuntimeError(error)
|
|
# If no process exited uncleanly, we check for timeouts, and then ensure
|
|
# each process exited cleanly.
|
|
for i, p in enumerate(self.processes):
|
|
if p.exitcode is None:
|
|
raise RuntimeError('Process {} terminated or timed out after {} seconds'.format(i, elapsed_time))
|
|
self.assertEqual(
|
|
p.exitcode,
|
|
first_process.exitcode,
|
|
msg="Expect process {} exit code to match Process 0 exit code of {}, but got {}".format(
|
|
i, first_process.exitcode, p.exitcode
|
|
),
|
|
)
|
|
for skip in TEST_SKIPS.values():
|
|
if first_process.exitcode == skip.exit_code:
|
|
raise unittest.SkipTest(skip.message)
|
|
self.assertEqual(
|
|
first_process.exitcode,
|
|
0,
|
|
msg="Expected zero exit code but got {}".format(first_process.exitcode)
|
|
)
|
|
|
|
@property
|
|
def is_master(self) -> bool:
|
|
return self.rank == 0
|