mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Set PYTHONHOME for inductor subprocesses using torch (#160008)
This is needed for subprocesses that are trying to call back into torch functionality, i.e. anything that's also setting `PYTHONPATH`. If they're part of an application that bundles the Python runtime, then they should use the bundled runtime to keep their view of the world consistent. There are more `sys.executable` subprocesses in torch/ but it seems like they're fine. Previous PR at https://github.com/pytorch/pytorch/pull/159382, but was reverted because it caused macOS jobs on GitHub to timeout. What was happening was inductor subprocesses were scheduling C++ compilation tasks that were failing to find the Python.h header. This was because they were running in venvs and now trying to find the CPython headers inside the venv, where the headers do not exist. This PR gates the new behavior to internal builds only. Pull Request resolved: https://github.com/pytorch/pytorch/pull/160008 Approved by: https://github.com/aorenste
This commit is contained in:
parent
0d3461bac0
commit
fdfd69bb05
|
|
@ -31,7 +31,12 @@ from torch._inductor.codecache import (
|
|||
get_hash,
|
||||
PyCodeCache,
|
||||
)
|
||||
from torch._inductor.utils import get_gpu_type, get_ld_library_path, is_gpu
|
||||
from torch._inductor.utils import (
|
||||
get_gpu_type,
|
||||
get_ld_library_path,
|
||||
is_gpu,
|
||||
python_subprocess_env,
|
||||
)
|
||||
from torch._logging import getArtifactLogger
|
||||
from torch.utils._ordered_set import OrderedSet
|
||||
|
||||
|
|
@ -123,11 +128,8 @@ class TuningProcess:
|
|||
f"--read-fd={str(subproc_read_fd)}",
|
||||
f"--write-fd={str(subproc_write_fd)}",
|
||||
]
|
||||
extra_env = {
|
||||
# We need to set the PYTHONPATH so the subprocess can find torch.
|
||||
"PYTHONPATH": os.environ.get(
|
||||
"TORCH_CUSTOM_PYTHONPATH", os.pathsep.join(sys.path)
|
||||
),
|
||||
env = {
|
||||
**python_subprocess_env(),
|
||||
# We shouldn't be using the Triton async compile subprocess pool,
|
||||
# but as a precaution set the env var that disables its creation.
|
||||
"TORCH_WARM_POOL": "0",
|
||||
|
|
@ -139,10 +141,10 @@ class TuningProcess:
|
|||
else "0",
|
||||
}
|
||||
if self.device is not None:
|
||||
extra_env[CUDA_VISIBLE_DEVICES] = str(self.device)
|
||||
env[CUDA_VISIBLE_DEVICES] = str(self.device)
|
||||
self.process = subprocess.Popen(
|
||||
cmd,
|
||||
env={**os.environ, **extra_env},
|
||||
env=env,
|
||||
pass_fds=(subproc_read_fd, subproc_write_fd),
|
||||
)
|
||||
os.close(subproc_read_fd)
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ from torch._inductor.compile_worker.tracked_process_pool import (
|
|||
TrackedProcessPoolExecutor,
|
||||
)
|
||||
from torch._inductor.compile_worker.utils import _async_compile_initializer
|
||||
from torch._inductor.utils import get_ld_library_path
|
||||
from torch._inductor.utils import get_ld_library_path, python_subprocess_env
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
@ -162,11 +162,7 @@ class SubprocPool:
|
|||
self.process = subprocess.Popen(
|
||||
cmd,
|
||||
env={
|
||||
**os.environ,
|
||||
# We need to set the PYTHONPATH so the subprocess can find torch.
|
||||
"PYTHONPATH": os.environ.get(
|
||||
"TORCH_CUSTOM_PYTHONPATH", os.pathsep.join(sys.path)
|
||||
),
|
||||
**python_subprocess_env(),
|
||||
# Safeguard against creating a SubprocPool in the subprocess.
|
||||
"TORCH_WARM_POOL": "0",
|
||||
# Some internal usages need a modified LD_LIBRARY_PATH.
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from typing import Any, Callable, Union
|
|||
|
||||
import torch
|
||||
from torch._inductor import config
|
||||
from torch._inductor.utils import python_subprocess_env
|
||||
|
||||
|
||||
_IS_WINDOWS = sys.platform == "win32"
|
||||
|
|
@ -131,12 +132,7 @@ cdll.LoadLibrary("__lib_path__")
|
|||
],
|
||||
cwd=output_dir,
|
||||
stderr=subprocess.DEVNULL,
|
||||
env={
|
||||
**os.environ,
|
||||
"PYTHONPATH": os.environ.get(
|
||||
"TORCH_CUSTOM_PYTHONPATH", os.pathsep.join(sys.path)
|
||||
),
|
||||
},
|
||||
env=python_subprocess_env(),
|
||||
)
|
||||
except Exception:
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import re
|
|||
import shutil
|
||||
import statistics
|
||||
import sys
|
||||
import sysconfig
|
||||
import tempfile
|
||||
import textwrap
|
||||
import time
|
||||
|
|
@ -3531,3 +3532,30 @@ def maybe_log_cudagraph_partition(
|
|||
warning_msg = f"{warning_msg}. Found from : \n {stack_trace}"
|
||||
|
||||
perf_hint_log.warning(warning_msg)
|
||||
|
||||
|
||||
def python_subprocess_env() -> dict[str, str]:
|
||||
"""
|
||||
Get a base environment for running Python subprocesses.
|
||||
"""
|
||||
|
||||
env = {
|
||||
# Inherit the environment of the current process.
|
||||
**os.environ,
|
||||
# Set the PYTHONPATH so the subprocess can find torch.
|
||||
"PYTHONPATH": os.environ.get(
|
||||
"TORCH_CUSTOM_PYTHONPATH", os.pathsep.join(sys.path)
|
||||
),
|
||||
}
|
||||
|
||||
# Set PYTHONHOME for internal builds, to account for builds that bundle the
|
||||
# runtime. Otherwise they will use the libraries and headers from the
|
||||
# platform runtime instead.
|
||||
#
|
||||
# This can't be done for external builds. The process can be run from a
|
||||
# venv and that won't include Python headers. The process needs to be able
|
||||
# to search for and find the platform runtime.
|
||||
if config.is_fbcode():
|
||||
env["PYTHONHOME"] = sysconfig.get_path("data")
|
||||
|
||||
return env
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user