mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/50458 libinterpreter.so contains a frozen python distribution including torch-python bindings. Freezing refers to serializing bytecode of python standard library modules as well as the torch python library and embedding them in the library code. This library can then be dlopened multiple times in one process context, each interpreter having its own python state and GIL. In addition, each python environment is sealed off from the filesystem and can only import the frozen modules included in the distribution. This change relies on newly added frozenpython, a cpython 3.8.6 fork built for this purpose. Frozenpython provides libpython3.8-frozen.a which contains frozen bytecode and object code for the python standard library. Building on top of frozen python, the frozen torch-python bindings are added in this diff, providing each embedded interpreter with a copy of the torch bindings. Each interpreter is intended to share one instance of libtorch and the underlying tensor libraries. Known issues - Autograd is not expected to work with the embedded interpreter currently, as it manages its own python interactions and needs to coordinate with the duplicated python states in each of the interpreters. - Distributed and cuda stuff is disabled in libinterpreter.so build, needs to be revisited - __file__ is not supported in the context of embedded python since there are no files for the underlying library modules. using __file__ - __version__ is not properly supported in the embedded torch-python, just a workaround for now Test Plan: tested locally and on CI with cmake and buck builds running torch::deploy interpreter_test Reviewed By: ailzhang Differential Revision: D25850783 fbshipit-source-id: a4656377caff25b73913daae7ae2f88bcab8fd88
75 lines
2.3 KiB
Python
75 lines
2.3 KiB
Python
|
|
import os
|
|
import inspect
|
|
import sys
|
|
import tempfile
|
|
|
|
# this arbitrary-looking assortment of functionality is provided here
|
|
# to have a central place for overrideable behavior. The motivating
|
|
# use is the FB build environment, where this source file is replaced
|
|
# by an equivalent.
|
|
|
|
if sys.executable == 'torch_deploy':
|
|
# __file__ is meaningless in the context of frozen torch used in torch deploy.
|
|
# setting empty torch_parent should allow below functions to operate without crashing,
|
|
# but it's unclear if there is a valid use case for them in the context of deploy.
|
|
torch_parent = ""
|
|
else:
|
|
if os.path.basename(os.path.dirname(__file__)) == 'shared':
|
|
torch_parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
|
else:
|
|
torch_parent = os.path.dirname(os.path.dirname(__file__))
|
|
|
|
def get_file_path(*path_components):
|
|
return os.path.join(torch_parent, *path_components)
|
|
|
|
|
|
def get_file_path_2(*path_components):
|
|
return os.path.join(*path_components)
|
|
|
|
|
|
def get_writable_path(path):
|
|
if os.access(path, os.W_OK):
|
|
return path
|
|
return tempfile.mkdtemp(suffix=os.path.basename(path))
|
|
|
|
|
|
|
|
def prepare_multiprocessing_environment(path):
|
|
pass
|
|
|
|
|
|
def resolve_library_path(path):
|
|
return os.path.realpath(path)
|
|
|
|
|
|
def get_source_lines_and_file(obj, error_msg=None):
|
|
"""
|
|
Wrapper around inspect.getsourcelines and inspect.getsourcefile.
|
|
|
|
Returns: (sourcelines, file_lino, filename)
|
|
"""
|
|
filename = None # in case getsourcefile throws
|
|
try:
|
|
filename = inspect.getsourcefile(obj)
|
|
sourcelines, file_lineno = inspect.getsourcelines(obj)
|
|
except OSError as e:
|
|
msg = (f"Can't get source for {obj}. TorchScript requires source access in "
|
|
"order to carry out compilation, make sure original .py files are "
|
|
"available.")
|
|
if error_msg:
|
|
msg += '\n' + error_msg
|
|
raise OSError(msg) from e
|
|
|
|
return sourcelines, file_lineno, filename
|
|
|
|
|
|
TEST_MASTER_ADDR = '127.0.0.1'
|
|
TEST_MASTER_PORT = 29500
|
|
# USE_GLOBAL_DEPS controls whether __init__.py tries to load
|
|
# libtorch_global_deps, see Note [Global dependencies]
|
|
USE_GLOBAL_DEPS = True
|
|
# USE_RTLD_GLOBAL_WITH_LIBTORCH controls whether __init__.py tries to load
|
|
# _C.so with RTLD_GLOBAL during the call to dlopen.
|
|
USE_RTLD_GLOBAL_WITH_LIBTORCH = False
|