Remove unnecessary const_casts (#121225)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/121225
Approved by: https://github.com/soulitzer
This commit is contained in:
cyy 2024-03-05 17:34:19 +00:00 committed by PyTorch MergeBot
parent 85c807b3fd
commit 6ecd65886a
7 changed files with 13 additions and 30 deletions

View File

@ -283,12 +283,8 @@ DLManagedTensor* toDLPack(const Tensor& src) {
atDLMTensor->tensor.dl_tensor.device = getDLDevice(src, device_id);
atDLMTensor->tensor.dl_tensor.ndim = src.dim();
atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src);
atDLMTensor->tensor.dl_tensor.shape =
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
const_cast<int64_t*>(view.sizes().data());
atDLMTensor->tensor.dl_tensor.strides =
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
const_cast<int64_t*>(view.strides().data());
atDLMTensor->tensor.dl_tensor.shape = view.sizes().data();
atDLMTensor->tensor.dl_tensor.strides = view.strides().data();
atDLMTensor->tensor.dl_tensor.byte_offset = 0;
return &(atDLMTensor->tensor);
}

View File

@ -137,7 +137,7 @@ bool CUDAHooks::isPinnedPtr(const void* data) const {
cudaPointerAttributes attr;
// We do not believe that CUDA needs mutable access to the data
// here.
cudaError_t err = cudaPointerGetAttributes(&attr, const_cast<void*>(data));
cudaError_t err = cudaPointerGetAttributes(&attr, data);
#if !defined(USE_ROCM)
if (err == cudaErrorInvalidValue) {
(void)cudaGetLastError(); // clear CUDA error

View File

@ -195,12 +195,12 @@ typedef struct {
/*! \brief The data type of the pointer*/
DLDataType dtype;
/*! \brief The shape of the tensor */
int64_t* shape;
const int64_t* shape;
/*!
* \brief strides of the tensor (in number of elements, not bytes)
* can be NULL, indicating tensor is compact and row-majored.
*/
int64_t* strides;
const int64_t* strides;
/*! \brief The offset in bytes to the beginning pointer to data */
uint64_t byte_offset;
} DLTensor;

View File

@ -104,7 +104,7 @@ inline void throw_error_for_complex_autograd(
// TODO: Blegh, bare references
inline void rebase_history(Variable& var, std::shared_ptr<Node> grad_fn) {
inline void rebase_history(const Variable& var, std::shared_ptr<Node> grad_fn) {
if (grad_fn && var.defined()) {
grad_fn->add_input_metadata(var);
impl::rebase_history(var, {std::move(grad_fn), 0});

View File

@ -184,8 +184,7 @@ static void basicAutogradNotImplementedFallbackImpl(
// users typically call .backward() and backprop through
// the entire program).
if (t.is_view() && is_mutable_output) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
auto& base = const_cast<at::TensorBase&>(t._base());
const auto& base = t._base();
if (base.requires_grad()) {
// Can only register_hook on tensors that require grad.
base.register_hook([op_name](const at::TensorBase& grad) {
@ -210,8 +209,7 @@ static void basicAutogradNotImplementedFallbackImpl(
// rebase_history assumes single Tensor(a!) return, and in general
// custom ops don't have a good in-place story.
if (!is_mutable_output) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
set_history(const_cast<at::Tensor&>(t), grad_fn);
set_history(t, grad_fn);
}
},
stack,
@ -418,11 +416,9 @@ static void autogradNotImplementedFallbackImpl(
[&](size_t idx_tensor, size_t idx_ret, const at::Tensor& t) {
if (isDifferentiableType(t.scalar_type())) {
if (is_inplace_output[idx_ret]) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
rebase_history(const_cast<at::Tensor&>(t), grad_fn);
rebase_history(t, grad_fn);
} else {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
set_history(const_cast<at::Tensor&>(t), grad_fn);
set_history(t, grad_fn);
}
}
},

View File

@ -65,7 +65,7 @@ inline bool compute_requires_grad(Args&&... args) {
}
inline void set_history(
at::Tensor& variable,
const at::Tensor& variable,
const std::shared_ptr<Node>& grad_fn) {
TORCH_CHECK(grad_fn != nullptr);
if (variable.defined()) {
@ -81,15 +81,7 @@ inline void set_history(
}
inline void set_history(
std::vector<Variable>&& variables,
const std::shared_ptr<Node>& grad_fn) {
for (auto& variable : variables) {
set_history(variable, grad_fn);
}
}
inline void set_history(
std::vector<Variable>& variables,
const std::vector<Variable>& variables,
const std::shared_ptr<Node>& grad_fn) {
for (auto& variable : variables) {
set_history(variable, grad_fn);

View File

@ -152,8 +152,7 @@ std::tuple<tensorpipe::Message, TensorpipeWriteBuffers> tensorpipeSerialize(
buffers.payload = std::move(rpcMessage->payload());
// TensorPipe uses the same Message class for both reading and writing, thus
// it uses non-const pointers even though it doesn't modify them when writing.
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
char* payloadPtr = const_cast<char*>(buffers.payload.data());
char* payloadPtr = buffers.payload.data();
// kTpMessagePayloadIdx = 2
tpMessage.payloads.push_back(
tensorpipe::Message::Payload{payloadPtr, buffers.payload.size()});