Fixup: rename BatchedTensorKey to Batched (#38798)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/38798

This makes it more in-line with the other keys in the file
(DispatchKey.h).

Test Plan: Imported from OSS

Differential Revision: D21691789

Pulled By: zou3519

fbshipit-source-id: 8d8b902360c0238f67bd0e58f9d969cec4b63320
This commit is contained in:
Richard Zou 2020-05-28 13:44:52 -07:00 committed by Facebook GitHub Bot
parent e029d678b6
commit d26f7f09b5
4 changed files with 5 additions and 3 deletions

View File

@ -7,7 +7,7 @@ namespace at {
BatchedTensorImpl::BatchedTensorImpl(Tensor value, BatchDims bdims)
: TensorImpl(
c10::DispatchKeySet(DispatchKey::BatchedTensorKey),
c10::DispatchKeySet(DispatchKey::Batched),
value.dtype(),
value.device()
)

View File

@ -84,7 +84,7 @@ struct TORCH_API BatchedTensorImpl : public c10::TensorImpl {
};
inline bool isBatched(const Tensor& tensor) {
return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::BatchedTensorKey);
return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::Batched);
}
// It is unsafe to call this on a Tensor that is not backed by a

View File

@ -40,6 +40,8 @@ const char* toString(DispatchKey t) {
return "Autograd";
case DispatchKey::BackendSelect:
return "BackendSelect";
case DispatchKey::Batched:
return "Batched";
case DispatchKey::TESTING_ONLY_GenericMode:
return "TESTING_ONLY_GenericMode";
case DispatchKey::Autocast:

View File

@ -154,7 +154,7 @@ enum class DispatchKey : uint8_t {
// This is the dispatch key for BatchedTensorImpl, which is used to implement
// batching rules for vmap.
BatchedTensorKey,
Batched,
// TESTING: This is intended to be a generic testing tensor type id.
// Don't use it for anything real; its only acceptable use is within a single