mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Avoid computing AutogradKey if not needed. (#46252)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/46252 Test Plan: CI Reviewed By: ngimel Differential Revision: D24272744 fbshipit-source-id: 6cb66d13e6c910df1ad1a8badd43f990e7b55368
This commit is contained in:
parent
ac245f6b45
commit
a37f2749cd
|
|
@ -497,10 +497,10 @@ DispatchKeySet _dk_set = c10::DispatchKeySet({dispatch_key}) | c10::detail::mult
|
|||
.findSchemaOrThrow("aten::{f.func.name.name}", "{f.func.name.overload_name}")
|
||||
.typed<{dispatcher_sig.type()}>();
|
||||
{compute_dk}
|
||||
DispatchKey _autograd_dk = c10::getAutogradKeyFromBackend(_dk);
|
||||
// This trick allows calling Autograd backend kernel first and then backend kernel,
|
||||
// without adding another AutogradBackendSelect dispatch key.
|
||||
DispatchKey _current_dk = at::impl::variable_excluded_from_dispatch() ? _dk : _autograd_dk;
|
||||
DispatchKey _current_dk = at::impl::variable_excluded_from_dispatch()
|
||||
? _dk : c10::getAutogradKeyFromBackend(_dk);
|
||||
return op.callWithDispatchKey(_current_dk, {', '.join(a.expr for a in dispatcher_exprs)});
|
||||
}}
|
||||
"""
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user