mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Remove unnecessary const_casts (#121225)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/121225 Approved by: https://github.com/soulitzer
This commit is contained in:
parent
85c807b3fd
commit
6ecd65886a
|
|
@ -283,12 +283,8 @@ DLManagedTensor* toDLPack(const Tensor& src) {
|
|||
atDLMTensor->tensor.dl_tensor.device = getDLDevice(src, device_id);
|
||||
atDLMTensor->tensor.dl_tensor.ndim = src.dim();
|
||||
atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src);
|
||||
atDLMTensor->tensor.dl_tensor.shape =
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
const_cast<int64_t*>(view.sizes().data());
|
||||
atDLMTensor->tensor.dl_tensor.strides =
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
const_cast<int64_t*>(view.strides().data());
|
||||
atDLMTensor->tensor.dl_tensor.shape = view.sizes().data();
|
||||
atDLMTensor->tensor.dl_tensor.strides = view.strides().data();
|
||||
atDLMTensor->tensor.dl_tensor.byte_offset = 0;
|
||||
return &(atDLMTensor->tensor);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ bool CUDAHooks::isPinnedPtr(const void* data) const {
|
|||
cudaPointerAttributes attr;
|
||||
// We do not believe that CUDA needs mutable access to the data
|
||||
// here.
|
||||
cudaError_t err = cudaPointerGetAttributes(&attr, const_cast<void*>(data));
|
||||
cudaError_t err = cudaPointerGetAttributes(&attr, data);
|
||||
#if !defined(USE_ROCM)
|
||||
if (err == cudaErrorInvalidValue) {
|
||||
(void)cudaGetLastError(); // clear CUDA error
|
||||
|
|
|
|||
|
|
@ -195,12 +195,12 @@ typedef struct {
|
|||
/*! \brief The data type of the pointer*/
|
||||
DLDataType dtype;
|
||||
/*! \brief The shape of the tensor */
|
||||
int64_t* shape;
|
||||
const int64_t* shape;
|
||||
/*!
|
||||
* \brief strides of the tensor (in number of elements, not bytes)
|
||||
* can be NULL, indicating tensor is compact and row-majored.
|
||||
*/
|
||||
int64_t* strides;
|
||||
const int64_t* strides;
|
||||
/*! \brief The offset in bytes to the beginning pointer to data */
|
||||
uint64_t byte_offset;
|
||||
} DLTensor;
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ inline void throw_error_for_complex_autograd(
|
|||
|
||||
// TODO: Blegh, bare references
|
||||
|
||||
inline void rebase_history(Variable& var, std::shared_ptr<Node> grad_fn) {
|
||||
inline void rebase_history(const Variable& var, std::shared_ptr<Node> grad_fn) {
|
||||
if (grad_fn && var.defined()) {
|
||||
grad_fn->add_input_metadata(var);
|
||||
impl::rebase_history(var, {std::move(grad_fn), 0});
|
||||
|
|
|
|||
|
|
@ -184,8 +184,7 @@ static void basicAutogradNotImplementedFallbackImpl(
|
|||
// users typically call .backward() and backprop through
|
||||
// the entire program).
|
||||
if (t.is_view() && is_mutable_output) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
auto& base = const_cast<at::TensorBase&>(t._base());
|
||||
const auto& base = t._base();
|
||||
if (base.requires_grad()) {
|
||||
// Can only register_hook on tensors that require grad.
|
||||
base.register_hook([op_name](const at::TensorBase& grad) {
|
||||
|
|
@ -210,8 +209,7 @@ static void basicAutogradNotImplementedFallbackImpl(
|
|||
// rebase_history assumes single Tensor(a!) return, and in general
|
||||
// custom ops don't have a good in-place story.
|
||||
if (!is_mutable_output) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
set_history(const_cast<at::Tensor&>(t), grad_fn);
|
||||
set_history(t, grad_fn);
|
||||
}
|
||||
},
|
||||
stack,
|
||||
|
|
@ -418,11 +416,9 @@ static void autogradNotImplementedFallbackImpl(
|
|||
[&](size_t idx_tensor, size_t idx_ret, const at::Tensor& t) {
|
||||
if (isDifferentiableType(t.scalar_type())) {
|
||||
if (is_inplace_output[idx_ret]) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
rebase_history(const_cast<at::Tensor&>(t), grad_fn);
|
||||
rebase_history(t, grad_fn);
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
set_history(const_cast<at::Tensor&>(t), grad_fn);
|
||||
set_history(t, grad_fn);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ inline bool compute_requires_grad(Args&&... args) {
|
|||
}
|
||||
|
||||
inline void set_history(
|
||||
at::Tensor& variable,
|
||||
const at::Tensor& variable,
|
||||
const std::shared_ptr<Node>& grad_fn) {
|
||||
TORCH_CHECK(grad_fn != nullptr);
|
||||
if (variable.defined()) {
|
||||
|
|
@ -81,15 +81,7 @@ inline void set_history(
|
|||
}
|
||||
|
||||
inline void set_history(
|
||||
std::vector<Variable>&& variables,
|
||||
const std::shared_ptr<Node>& grad_fn) {
|
||||
for (auto& variable : variables) {
|
||||
set_history(variable, grad_fn);
|
||||
}
|
||||
}
|
||||
|
||||
inline void set_history(
|
||||
std::vector<Variable>& variables,
|
||||
const std::vector<Variable>& variables,
|
||||
const std::shared_ptr<Node>& grad_fn) {
|
||||
for (auto& variable : variables) {
|
||||
set_history(variable, grad_fn);
|
||||
|
|
|
|||
|
|
@ -152,8 +152,7 @@ std::tuple<tensorpipe::Message, TensorpipeWriteBuffers> tensorpipeSerialize(
|
|||
buffers.payload = std::move(rpcMessage->payload());
|
||||
// TensorPipe uses the same Message class for both reading and writing, thus
|
||||
// it uses non-const pointers even though it doesn't modify them when writing.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
char* payloadPtr = const_cast<char*>(buffers.payload.data());
|
||||
char* payloadPtr = buffers.payload.data();
|
||||
// kTpMessagePayloadIdx = 2
|
||||
tpMessage.payloads.push_back(
|
||||
tensorpipe::Message::Payload{payloadPtr, buffers.payload.size()});
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user