pytorch/torch/_dynamo/backends/torchxla.py
Jason Ansel 60e8c766b5 Refactor dynamo training backends (#93409)
This splits training.py into many files and moves them from `dynamo.optimizations.training` to `dynamo.backends.*`.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/93409
Approved by: https://github.com/ezyang
2023-02-03 03:07:15 +00:00

40 lines
975 B
Python

import logging
from ..backends.common import aot_autograd
from ..backends.registry import register_backend
log = logging.getLogger(__name__)
@register_backend
def torchxla_trivial(gm, fake_tensor_inputs):
return gm
@register_backend
def torchxla_trace_once(model, fake_tensor_inputs):
import torch_xla.core.dynamo_bridge as bridge # type: ignore[import]
compiled_graph = None
def fwd(*args):
nonlocal model
nonlocal compiled_graph
if compiled_graph is None:
compiled_graph = bridge.extract_compiled_graph(model, args)
del model
return compiled_graph(*args)
return fwd
aot_torchxla_trivial = aot_autograd(
fw_compiler=torchxla_trivial,
)
register_backend(name="aot_torchxla_trivial", compiler_fn=aot_torchxla_trivial)
aot_torchxla_trace_once = aot_autograd(
fw_compiler=torchxla_trace_once,
)
register_backend(name="aot_torchxla_trace_once", compiler_fn=aot_torchxla_trace_once)