Remove some stale xla dynamo backend (#122128)

`torchxla_trace_once ` and `aot_torchxla_trivial ` should be removed.

In our internal(hopefully dashboard can be open source soon) torchbench daily runs, `openxla` backend has much higher passing rate and similar perfomrance as the `openxla_eval`(non-aot-auto-grad backend). We still use `openxla_eval` in llama2 example but I think we should move user to `openxla` backend going forward.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/122128
Approved by: https://github.com/alanwaketan, https://github.com/jansel
This commit is contained in:
JackCaoG 2024-03-21 01:13:46 +00:00 committed by PyTorch MergeBot
parent c20cf97366
commit e38d60bc07

View File

@ -1,7 +1,6 @@
# mypy: ignore-errors
import logging
import warnings
from functorch.compile import make_boxed_func
@ -12,20 +11,6 @@ log = logging.getLogger(__name__)
@register_experimental_backend
def torchxla_trivial(gm, fake_tensor_inputs):
return gm
@register_experimental_backend
def torchxla_trace_once(model, fake_tensor_inputs):
warnings.warn(
"This backend will be deprecated in 2.2, please use `openxla` backend instead"
)
return xla_backend_helper(model, fake_tensor_inputs)
@register_backend
def openxla_eval(model, fake_tensor_inputs):
return xla_backend_helper(model, fake_tensor_inputs, boxed=False)
@ -55,20 +40,6 @@ def xla_backend_helper(model, fake_tensor_inputs, boxed=False):
return make_boxed_func(fwd) if boxed else fwd
aot_torchxla_trivial = aot_autograd(
fw_compiler=torchxla_trivial,
)
register_experimental_backend(
name="aot_torchxla_trivial", compiler_fn=aot_torchxla_trivial
)
aot_torchxla_trace_once = aot_autograd(
fw_compiler=torchxla_trace_once,
)
register_experimental_backend(
name="aot_torchxla_trace_once", compiler_fn=aot_torchxla_trace_once
)
openxla = aot_autograd(
fw_compiler=openxla_eval_boxed,
)