mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
* Use restat to reduce ninja rebuilding when running codegen. Usually, you're only working on one codegen file at a time, but in our old behavior, editing one would induce a rebuild of everything that depended on ANY generated file. We fix this in two steps: - Don't write the file (updating the timestamp) when the contents are unchanged. (I had to update three seperate places; shared Python library for build tools when?!) - Use the 'restat' ninja feature to avoid rebuilding when the timestamp doesn't change. Signed-off-by: Edward Z. Yang <ezyang@fb.com> * lintfix Signed-off-by: Edward Z. Yang <ezyang@fb.com> * lintfix2 Signed-off-by: Edward Z. Yang <ezyang@fb.com>
109 lines
4.1 KiB
Python
109 lines
4.1 KiB
Python
import os
|
|
import sys
|
|
import glob
|
|
|
|
source_files = set(['.py', '.cpp', '.h'])
|
|
|
|
|
|
# TODO: This is a little inaccurate, because it will also pick
|
|
# up setup_helper scripts which don't affect code generation
|
|
def all_generator_source():
|
|
r = []
|
|
for directory, _, filenames in os.walk('tools'):
|
|
for f in filenames:
|
|
if os.path.splitext(f)[1] in source_files:
|
|
full = os.path.join(directory, f)
|
|
r.append(full)
|
|
return sorted(r)
|
|
|
|
|
|
inputs = [
|
|
'torch/csrc/generic/TensorMethods.cwrap',
|
|
'torch/lib/tmp_install/share/ATen/Declarations.yaml',
|
|
'tools/autograd/derivatives.yaml',
|
|
] + glob.glob('torch/csrc/generic/methods/*.cwrap')
|
|
|
|
outputs = [
|
|
'torch/csrc/autograd/generated/Functions.cpp',
|
|
'torch/csrc/autograd/generated/Functions.h',
|
|
'torch/csrc/autograd/generated/python_functions.cpp',
|
|
'torch/csrc/autograd/generated/python_functions.h',
|
|
'torch/csrc/autograd/generated/python_nn_functions.cpp',
|
|
'torch/csrc/autograd/generated/python_nn_functions.h',
|
|
'torch/csrc/autograd/generated/python_nn_functions_dispatch.h',
|
|
'torch/csrc/autograd/generated/python_variable_methods.cpp',
|
|
'torch/csrc/autograd/generated/python_variable_methods_dispatch.h',
|
|
'torch/csrc/autograd/generated/VariableType.cpp',
|
|
'torch/csrc/autograd/generated/VariableType.h',
|
|
'torch/csrc/jit/generated/aten_dispatch.cpp',
|
|
'torch/csrc/jit/generated/aten_dispatch.h',
|
|
]
|
|
|
|
|
|
def generate_code_ninja(w):
|
|
all_inputs = all_generator_source() + inputs
|
|
cmd = "{} {}".format(sys.executable, 'tools/setup_helpers/generate_code.py')
|
|
w.writer.build(
|
|
outputs, 'do_cmd', all_inputs,
|
|
variables={
|
|
'cmd': cmd,
|
|
# Note [Unchanging results for ninja]
|
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
# generate_code.py will avoid bumping the timestamp on its
|
|
# output files if the contents of the generated file did not
|
|
# change. To let Ninja take advantage of this, it must stat
|
|
# the output files after the build. See
|
|
# https://groups.google.com/forum/#!topic/ninja-build/rExDmgDL2oc
|
|
# for a more detailed discussion.
|
|
'restat': True,
|
|
})
|
|
|
|
|
|
def generate_code(ninja_global=None):
|
|
# if ninja is enabled, we just register this file as something
|
|
# ninja will need to call if needed
|
|
if ninja_global is not None:
|
|
return generate_code_ninja(ninja_global)
|
|
|
|
# cwrap depends on pyyaml, so we can't import it earlier
|
|
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
sys.path.insert(0, root)
|
|
from tools.cwrap import cwrap
|
|
from tools.cwrap.plugins.THPPlugin import THPPlugin
|
|
from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin
|
|
from tools.cwrap.plugins.AutoGPU import AutoGPU
|
|
from tools.cwrap.plugins.BoolOption import BoolOption
|
|
from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin
|
|
from tools.cwrap.plugins.NullableArguments import NullableArguments
|
|
|
|
from tools.cwrap.plugins.WrapDim import WrapDim
|
|
from tools.cwrap.plugins.AssertNDim import AssertNDim
|
|
|
|
from tools.cwrap.plugins.Broadcast import Broadcast
|
|
from tools.cwrap.plugins.ProcessorSpecificPlugin import ProcessorSpecificPlugin
|
|
from tools.autograd.gen_autograd import gen_autograd
|
|
from tools.jit.gen_jit_dispatch import gen_jit_dispatch
|
|
thp_plugin = THPPlugin()
|
|
|
|
cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[
|
|
ProcessorSpecificPlugin(), BoolOption(), thp_plugin,
|
|
AutoGPU(condition='IS_CUDA'), ArgcountSortPlugin(), KwargsPlugin(),
|
|
AssertNDim(), WrapDim(), Broadcast()
|
|
])
|
|
# Build ATen based Variable classes
|
|
autograd_gen_dir = 'torch/csrc/autograd/generated'
|
|
jit_gen_dir = 'torch/csrc/jit/generated'
|
|
for d in (autograd_gen_dir, jit_gen_dir):
|
|
if not os.path.exists(d):
|
|
os.mkdir(d)
|
|
gen_autograd(
|
|
'torch/lib/tmp_install/share/ATen/Declarations.yaml',
|
|
autograd_gen_dir)
|
|
gen_jit_dispatch(
|
|
'torch/lib/tmp_install/share/ATen/Declarations.yaml',
|
|
jit_gen_dir)
|
|
|
|
# called from ninja
|
|
if __name__ == "__main__":
|
|
generate_code(None)
|