mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Revert "[3/N] Add -Wdeprecated and related fixes (#109698)"
This reverts commit c31fcdaa4f.
Reverted https://github.com/pytorch/pytorch/pull/109698 on behalf of https://github.com/PaliC due to breaking quantization tests ( quantization/test_quantize_per_channel_sub_byte and quantization/test_quantize_per_channel_float_qparams) internally ([comment](https://github.com/pytorch/pytorch/pull/109698#issuecomment-1746999806))
This commit is contained in:
parent
5220d0dfaf
commit
156aefa89b
|
|
@ -15,14 +15,12 @@ class VulkanPackedContext {
|
|||
|
||||
public:
|
||||
VulkanPackedContext() : packed_{c10::AnyType::get()} {}
|
||||
VulkanPackedContext(const VulkanPackedContext&) = default;
|
||||
VulkanPackedContext(VulkanPackedContext&&) = default;
|
||||
|
||||
inline const c10::IValue get_val(int64_t i) const {
|
||||
return packed_.get(i);
|
||||
}
|
||||
|
||||
inline void set_val(int64_t i, const c10::IValue& val) const {
|
||||
inline void set_val(int64_t i, c10::IValue val) const {
|
||||
return packed_.set(i, val);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -62,14 +62,6 @@ namespace impl {
|
|||
* those uses will be devirtualized.
|
||||
*/
|
||||
struct C10_API DeviceGuardImplInterface {
|
||||
DeviceGuardImplInterface() = default;
|
||||
DeviceGuardImplInterface(const DeviceGuardImplInterface&) = default;
|
||||
DeviceGuardImplInterface& operator=(const DeviceGuardImplInterface&) =
|
||||
default;
|
||||
DeviceGuardImplInterface(DeviceGuardImplInterface&&) noexcept = default;
|
||||
DeviceGuardImplInterface& operator=(DeviceGuardImplInterface&&) noexcept =
|
||||
default;
|
||||
|
||||
/**
|
||||
* Return the type of device managed by this guard implementation.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -22,11 +22,6 @@ class TORCH_API IMethod {
|
|||
using IValueList = std::vector<c10::IValue>;
|
||||
using IValueMap = std::unordered_map<std::string, at::IValue>;
|
||||
|
||||
IMethod() = default;
|
||||
IMethod(const IMethod&) = default;
|
||||
IMethod& operator=(const IMethod&) = default;
|
||||
IMethod(IMethod&&) noexcept = default;
|
||||
IMethod& operator=(IMethod&&) noexcept = default;
|
||||
virtual ~IMethod() = default;
|
||||
|
||||
virtual c10::IValue operator()(
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ namespace nn {
|
|||
/// because then storing a module would always require templatizing it.
|
||||
template <typename Derived>
|
||||
// NOLINTNEXTLINE(bugprone-exception-escape)
|
||||
class Cloneable : public Module {
|
||||
class Cloneable : public virtual Module {
|
||||
public:
|
||||
using Module::Module;
|
||||
|
||||
|
|
@ -90,7 +90,7 @@ class Cloneable : public Module {
|
|||
clone != nullptr,
|
||||
"Attempted to clone submodule, but it is of a "
|
||||
"different type than the submodule it was to be cloned into");
|
||||
static_cast<Derived&>(*this) = *clone;
|
||||
static_cast<Derived&>(*this) = std::move(*clone);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -81,10 +81,6 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
|
|||
/// The name of the submodule is inferred via RTTI (if possible) the first
|
||||
/// time `.name()` is invoked.
|
||||
Module();
|
||||
Module(const Module&) = default;
|
||||
Module& operator=(const Module&) = default;
|
||||
Module(Module&&) noexcept = default;
|
||||
Module& operator=(Module&&) noexcept = default;
|
||||
|
||||
virtual ~Module() = default;
|
||||
|
||||
|
|
|
|||
|
|
@ -137,23 +137,7 @@ class BatchNormImplBase : public NormImplBase<D, Derived, BatchNormOptions> {
|
|||
}
|
||||
|
||||
/// Pretty prints the `BatchNorm{1,2,3}d` module into the given `stream`.
|
||||
void pretty_print(std::ostream& stream) const override {
|
||||
stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d("
|
||||
<< this->options.num_features() << ", "
|
||||
<< "eps=" << this->options.eps() << ", "
|
||||
<< "momentum=";
|
||||
|
||||
if (this->options.momentum().has_value()) {
|
||||
stream << this->options.momentum().value();
|
||||
} else {
|
||||
stream << "None";
|
||||
}
|
||||
|
||||
stream << ", "
|
||||
<< "affine=" << this->options.affine() << ", "
|
||||
<< "track_running_stats=" << this->options.track_running_stats()
|
||||
<< ")";
|
||||
}
|
||||
void pretty_print(std::ostream& stream) const override;
|
||||
};
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm1d
|
||||
|
|
|
|||
|
|
@ -91,8 +91,6 @@ class AnyValue {
|
|||
struct Placeholder {
|
||||
explicit Placeholder(const std::type_info& type_info_) noexcept
|
||||
: type_info(type_info_) {}
|
||||
Placeholder(const Placeholder&) = default;
|
||||
Placeholder(Placeholder&&) = default;
|
||||
virtual ~Placeholder() = default;
|
||||
virtual std::unique_ptr<Placeholder> clone() const {
|
||||
TORCH_CHECK(false, "clone() should only be called on `AnyValue::Holder`");
|
||||
|
|
|
|||
|
|
@ -45,15 +45,7 @@ class InstanceNormImpl
|
|||
}
|
||||
|
||||
/// Pretty prints the `InstanceNorm{1,2,3}d` module into the given `stream`.
|
||||
void pretty_print(std::ostream& stream) const override {
|
||||
stream << std::boolalpha << "torch::nn::InstanceNorm" << D << "d("
|
||||
<< this->options.num_features() << ", "
|
||||
<< "eps=" << this->options.eps() << ", "
|
||||
<< "momentum=" << this->options.momentum() << ", "
|
||||
<< "affine=" << this->options.affine() << ", "
|
||||
<< "track_running_stats=" << this->options.track_running_stats()
|
||||
<< ")";
|
||||
}
|
||||
void pretty_print(std::ostream& stream) const override;
|
||||
};
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm1d
|
||||
|
|
|
|||
|
|
@ -164,6 +164,8 @@ struct TORCH_API RNNCellOptionsBase {
|
|||
int64_t hidden_size,
|
||||
bool bias,
|
||||
int64_t num_chunks);
|
||||
virtual ~RNNCellOptionsBase() = default;
|
||||
|
||||
TORCH_ARG(int64_t, input_size);
|
||||
TORCH_ARG(int64_t, hidden_size);
|
||||
TORCH_ARG(bool, bias);
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ struct TORCH_API AdagradOptions
|
|||
TORCH_API friend bool operator==(
|
||||
const AdagradOptions& lhs,
|
||||
const AdagradOptions& rhs);
|
||||
~AdagradOptions() override = default;
|
||||
double get_lr() const override;
|
||||
void set_lr(const double lr) override;
|
||||
};
|
||||
|
|
@ -44,16 +45,12 @@ struct TORCH_API AdagradParamState
|
|||
TORCH_ARG(int64_t, step) = 0;
|
||||
|
||||
public:
|
||||
AdagradParamState() = default;
|
||||
AdagradParamState(const AdagradParamState&) = default;
|
||||
AdagradParamState& operator=(const AdagradParamState&) = default;
|
||||
AdagradParamState(AdagradParamState&&) noexcept = default;
|
||||
AdagradParamState& operator=(AdagradParamState&&) noexcept = default;
|
||||
void serialize(torch::serialize::InputArchive& archive) override;
|
||||
void serialize(torch::serialize::OutputArchive& archive) const override;
|
||||
TORCH_API friend bool operator==(
|
||||
const AdagradParamState& lhs,
|
||||
const AdagradParamState& rhs);
|
||||
~AdagradParamState() override = default;
|
||||
};
|
||||
|
||||
class TORCH_API Adagrad : public Optimizer {
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ struct TORCH_API AdamOptions : public OptimizerCloneableOptions<AdamOptions> {
|
|||
TORCH_API friend bool operator==(
|
||||
const AdamOptions& lhs,
|
||||
const AdamOptions& rhs);
|
||||
~AdamOptions() override = default;
|
||||
double get_lr() const override;
|
||||
void set_lr(const double lr) override;
|
||||
};
|
||||
|
|
@ -49,6 +50,7 @@ struct TORCH_API AdamParamState
|
|||
TORCH_API friend bool operator==(
|
||||
const AdamParamState& lhs,
|
||||
const AdamParamState& rhs);
|
||||
~AdamParamState() override = default;
|
||||
};
|
||||
|
||||
class TORCH_API Adam : public Optimizer {
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ struct TORCH_API AdamWOptions : public OptimizerCloneableOptions<AdamWOptions> {
|
|||
TORCH_API friend bool operator==(
|
||||
const AdamWOptions& lhs,
|
||||
const AdamWOptions& rhs);
|
||||
~AdamWOptions() override = default;
|
||||
double get_lr() const override;
|
||||
void set_lr(const double lr) override;
|
||||
};
|
||||
|
|
@ -49,6 +50,7 @@ struct TORCH_API AdamWParamState
|
|||
TORCH_API friend bool operator==(
|
||||
const AdamWParamState& lhs,
|
||||
const AdamWParamState& rhs);
|
||||
~AdamWParamState() override = default;
|
||||
};
|
||||
|
||||
class TORCH_API AdamW : public Optimizer {
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions<LBFGSOptions> {
|
|||
TORCH_API friend bool operator==(
|
||||
const LBFGSOptions& lhs,
|
||||
const LBFGSOptions& rhs);
|
||||
~LBFGSOptions() override = default;
|
||||
double get_lr() const override;
|
||||
void set_lr(const double lr) override;
|
||||
};
|
||||
|
|
@ -53,6 +54,7 @@ struct TORCH_API LBFGSParamState
|
|||
TORCH_API friend bool operator==(
|
||||
const LBFGSParamState& lhs,
|
||||
const LBFGSParamState& rhs);
|
||||
~LBFGSParamState() override = default;
|
||||
};
|
||||
|
||||
class TORCH_API LBFGS : public Optimizer {
|
||||
|
|
|
|||
|
|
@ -34,11 +34,6 @@ namespace optim {
|
|||
|
||||
class TORCH_API OptimizerParamState {
|
||||
public:
|
||||
OptimizerParamState() = default;
|
||||
OptimizerParamState(const OptimizerParamState&) = default;
|
||||
OptimizerParamState& operator=(const OptimizerParamState&) = default;
|
||||
OptimizerParamState(OptimizerParamState&&) noexcept = default;
|
||||
OptimizerParamState& operator=(OptimizerParamState&&) noexcept = default;
|
||||
virtual std::unique_ptr<OptimizerParamState> clone() const;
|
||||
virtual void serialize(torch::serialize::InputArchive& archive);
|
||||
virtual void serialize(torch::serialize::OutputArchive& archive) const;
|
||||
|
|
@ -54,11 +49,6 @@ class OptimizerCloneableParamState : public OptimizerParamState {
|
|||
|
||||
class TORCH_API OptimizerOptions {
|
||||
public:
|
||||
OptimizerOptions() = default;
|
||||
OptimizerOptions(const OptimizerOptions&) = default;
|
||||
OptimizerOptions& operator=(const OptimizerOptions&) = default;
|
||||
OptimizerOptions(OptimizerOptions&&) noexcept = default;
|
||||
OptimizerOptions& operator=(OptimizerOptions&&) noexcept = default;
|
||||
virtual std::unique_ptr<OptimizerOptions> clone() const;
|
||||
virtual void serialize(torch::serialize::InputArchive& archive);
|
||||
virtual void serialize(torch::serialize::OutputArchive& archive) const;
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ struct TORCH_API RMSpropOptions
|
|||
TORCH_API friend bool operator==(
|
||||
const RMSpropOptions& lhs,
|
||||
const RMSpropOptions& rhs);
|
||||
~RMSpropOptions() override = default;
|
||||
double get_lr() const override;
|
||||
void set_lr(const double lr) override;
|
||||
};
|
||||
|
|
@ -54,6 +55,7 @@ struct TORCH_API RMSpropParamState
|
|||
TORCH_API friend bool operator==(
|
||||
const RMSpropParamState& lhs,
|
||||
const RMSpropParamState& rhs);
|
||||
~RMSpropParamState() override = default;
|
||||
};
|
||||
|
||||
class TORCH_API RMSprop : public Optimizer {
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ struct TORCH_API SGDOptions : public OptimizerCloneableOptions<SGDOptions> {
|
|||
TORCH_API friend bool operator==(
|
||||
const SGDOptions& lhs,
|
||||
const SGDOptions& rhs);
|
||||
~SGDOptions() override = default;
|
||||
double get_lr() const override;
|
||||
void set_lr(const double lr) override;
|
||||
};
|
||||
|
|
@ -48,6 +49,7 @@ struct TORCH_API SGDParamState
|
|||
TORCH_API friend bool operator==(
|
||||
const SGDParamState& lhs,
|
||||
const SGDParamState& rhs);
|
||||
~SGDParamState() override = default;
|
||||
};
|
||||
|
||||
class TORCH_API SGD : public Optimizer {
|
||||
|
|
|
|||
|
|
@ -432,7 +432,7 @@ void ThresholdImpl::pretty_print(std::ostream& stream) const {
|
|||
|
||||
MultiheadAttentionImpl::MultiheadAttentionImpl(
|
||||
const MultiheadAttentionOptions& options_)
|
||||
: Cloneable("torch::nn::MultiheadAttention"), options(options_) {
|
||||
: Module("torch::nn::MultiheadAttention"), options(options_) {
|
||||
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)
|
||||
reset();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,6 +14,25 @@
|
|||
namespace torch {
|
||||
namespace nn {
|
||||
|
||||
template <size_t D, typename Derived>
|
||||
void BatchNormImplBase<D, Derived>::pretty_print(std::ostream& stream) const {
|
||||
stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d("
|
||||
<< this->options.num_features() << ", "
|
||||
<< "eps=" << this->options.eps() << ", "
|
||||
<< "momentum=";
|
||||
|
||||
if (this->options.momentum().has_value()) {
|
||||
stream << this->options.momentum().value();
|
||||
} else {
|
||||
stream << "None";
|
||||
}
|
||||
|
||||
stream << ", "
|
||||
<< "affine=" << this->options.affine() << ", "
|
||||
<< "track_running_stats=" << this->options.track_running_stats()
|
||||
<< ")";
|
||||
}
|
||||
|
||||
void BatchNorm1dImpl::_check_input_dim(const Tensor& input) {
|
||||
TORCH_CHECK(
|
||||
input.dim() == 2 || input.dim() == 3,
|
||||
|
|
|
|||
|
|
@ -4,6 +4,17 @@
|
|||
namespace torch {
|
||||
namespace nn {
|
||||
|
||||
template <size_t D, typename Derived>
|
||||
void InstanceNormImpl<D, Derived>::pretty_print(std::ostream& stream) const {
|
||||
stream << std::boolalpha << "torch::nn::InstanceNorm" << D << "d("
|
||||
<< this->options.num_features() << ", "
|
||||
<< "eps=" << this->options.eps() << ", "
|
||||
<< "momentum=" << this->options.momentum() << ", "
|
||||
<< "affine=" << this->options.affine() << ", "
|
||||
<< "track_running_stats=" << this->options.track_running_stats()
|
||||
<< ")";
|
||||
}
|
||||
|
||||
void InstanceNorm1dImpl::_check_input_dim(const Tensor& input) {
|
||||
if (input.dim() != 3 && input.dim() != 2) {
|
||||
TORCH_CHECK(
|
||||
|
|
|
|||
|
|
@ -28,9 +28,6 @@ class TORCH_API Store : public torch::CustomClassHolder {
|
|||
explicit Store(const std::chrono::milliseconds& timeout)
|
||||
: timeout_(timeout) {}
|
||||
|
||||
Store(const Store&) = default;
|
||||
Store(Store&&) noexcept = default;
|
||||
|
||||
~Store() override = default;
|
||||
|
||||
void set(const std::string& key, const std::string& value);
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ class PYBIND11_EXPORT PyRRef {
|
|||
// for more explanations.
|
||||
explicit PyRRef(const py::object& value, const py::object& type_hint);
|
||||
explicit PyRRef(c10::intrusive_ptr<RRef> rref);
|
||||
PyRRef(const PyRRef&) = default;
|
||||
~PyRRef();
|
||||
|
||||
bool isOwner() const;
|
||||
|
|
|
|||
|
|
@ -142,7 +142,8 @@ std::unique_ptr<RRefUserDelete> RRefUserDelete::fromMessage(
|
|||
const Message& message) {
|
||||
auto pair =
|
||||
ForkMessageBase::fromMessage(message, MessageType::RREF_USER_DELETE);
|
||||
return std::make_unique<RRefUserDelete>(pair.first, pair.second);
|
||||
return std::make_unique<RRefUserDelete>(
|
||||
RRefUserDelete(pair.first, pair.second));
|
||||
}
|
||||
|
||||
std::unique_ptr<RemoteRet> RemoteRet::fromMessage(const Message& message) {
|
||||
|
|
|
|||
|
|
@ -161,9 +161,11 @@ C10_DEFINE_REGISTRY_WITHOUT_WARNING(
|
|||
|
||||
const std::string& TensorPipeAgent::guessAddress() {
|
||||
static const std::string uvAddress = []() {
|
||||
tensorpipe::Error error;
|
||||
std::string result;
|
||||
char* ifnameEnv = std::getenv(kSocketIfnameEnvVar.c_str());
|
||||
if (ifnameEnv != nullptr) {
|
||||
auto [error, result] =
|
||||
std::tie(error, result) =
|
||||
tensorpipe::transport::uv::lookupAddrForIface(ifnameEnv);
|
||||
if (error) {
|
||||
LOG(WARNING) << "Failed to look up the IP address for interface "
|
||||
|
|
@ -171,13 +173,15 @@ const std::string& TensorPipeAgent::guessAddress() {
|
|||
<< kDefaultUvAddress;
|
||||
return kDefaultUvAddress;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
auto [error, result] = tensorpipe::transport::uv::lookupAddrForHostname();
|
||||
if (error) {
|
||||
LOG(WARNING) << "Failed to look up the IP address for the hostname ("
|
||||
<< error.what() << "), defaulting to " << kDefaultUvAddress;
|
||||
return kDefaultUvAddress;
|
||||
} else {
|
||||
std::tie(error, result) =
|
||||
tensorpipe::transport::uv::lookupAddrForHostname();
|
||||
if (error) {
|
||||
LOG(WARNING) << "Failed to look up the IP address for the hostname ("
|
||||
<< error.what() << "), defaulting to "
|
||||
<< kDefaultUvAddress;
|
||||
return kDefaultUvAddress;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}();
|
||||
|
|
@ -1222,8 +1226,8 @@ const std::string& TensorPipeAgent::findWorkerURL(
|
|||
|
||||
void TensorPipeAgent::updateGroupMembership(
|
||||
const WorkerInfo& workerInfo,
|
||||
const std::vector<c10::Device>& devices,
|
||||
const std::unordered_map<std::string, DeviceMap>& reverseDeviceMaps,
|
||||
const std::vector<c10::Device> devices,
|
||||
const std::unordered_map<std::string, DeviceMap> reverseDeviceMaps,
|
||||
bool isJoin) {
|
||||
std::string name = workerInfo.name_;
|
||||
worker_id_t id = workerInfo.id_;
|
||||
|
|
|
|||
|
|
@ -194,8 +194,8 @@ class TORCH_API TensorPipeAgent : public RpcAgent {
|
|||
std::vector<WorkerInfo> getWorkerInfos() const override;
|
||||
void updateGroupMembership(
|
||||
const WorkerInfo& workerInfo,
|
||||
const std::vector<c10::Device>& devices,
|
||||
const std::unordered_map<std::string, DeviceMap>& reverseDeviceMaps,
|
||||
const std::vector<c10::Device> devices,
|
||||
const std::unordered_map<std::string, DeviceMap> reverseDeviceMaps,
|
||||
bool isJoin);
|
||||
|
||||
std::unordered_map<std::string, std::string> getMetrics() override;
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ class SGDParamState {
|
|||
static_cast<const SGDParamState&>(*this));
|
||||
}
|
||||
friend bool operator==(const SGDParamState& lhs, const SGDParamState& rhs);
|
||||
~SGDParamState() = default;
|
||||
};
|
||||
|
||||
struct TORCH_API SGDOptions {
|
||||
|
|
@ -39,6 +40,7 @@ struct TORCH_API SGDOptions {
|
|||
TORCH_API friend bool operator==(
|
||||
const SGDOptions& lhs,
|
||||
const SGDOptions& rhs);
|
||||
~SGDOptions() = default;
|
||||
};
|
||||
|
||||
/// Stores parameters in the param_group and stores a pointer to the SGDOptions
|
||||
|
|
|
|||
|
|
@ -1100,6 +1100,7 @@ Code::Code(
|
|||
remaining_bailout_depth)) {}
|
||||
|
||||
Code::Code(CodeImpl* codeImpl) : pImpl(codeImpl) {}
|
||||
Code::~Code() = default;
|
||||
|
||||
MobileCode::MobileCode(
|
||||
const std::shared_ptr<Graph>& graph,
|
||||
|
|
@ -1116,6 +1117,8 @@ MobileCode::MobileCode(
|
|||
emit_promoted_ops,
|
||||
remaining_bailout_depth)) {}
|
||||
|
||||
MobileCode::~MobileCode() = default;
|
||||
|
||||
const std::vector<GraphExecutor*>& Code::grad_executors() {
|
||||
return pImpl->grad_executors();
|
||||
}
|
||||
|
|
@ -1169,6 +1172,7 @@ InterpreterState::InterpreterState(const Code& code, TaskLauncher taskLauncher)
|
|||
: pImpl(c10::make_intrusive<InterpreterStateImpl>(
|
||||
code,
|
||||
std::move(taskLauncher))) {}
|
||||
InterpreterState::~InterpreterState() = default;
|
||||
|
||||
void InterpreterState::run(Stack& stack) {
|
||||
static_cast<InterpreterStateImpl*>(pImpl.get())->run(stack);
|
||||
|
|
|
|||
|
|
@ -9,6 +9,11 @@
|
|||
#include <torch/csrc/Export.h>
|
||||
#include <torch/csrc/jit/frontend/source_range.h>
|
||||
|
||||
C10_CLANG_DIAGNOSTIC_PUSH()
|
||||
#if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor")
|
||||
C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy-dtor")
|
||||
#endif
|
||||
|
||||
C10_DECLARE_bool(torch_jit_disable_warning_prints);
|
||||
C10_DECLARE_bool(torch_jit_enable_rethrow_caught_exception);
|
||||
|
||||
|
|
@ -50,6 +55,7 @@ struct TORCH_API Code {
|
|||
const std::shared_ptr<Graph>& graph,
|
||||
std::string function_name,
|
||||
size_t remaining_bailout_depth = 0);
|
||||
~Code();
|
||||
|
||||
const std::vector<GraphExecutor*>& grad_executors();
|
||||
const std::vector<GraphExecutor*>& diff_graph_op_executors();
|
||||
|
|
@ -83,6 +89,7 @@ struct TORCH_API MobileCode : Code {
|
|||
bool support_default_args_before_out = true,
|
||||
bool emit_promoted_ops = true,
|
||||
size_t remaining_bailout_depth = 0);
|
||||
~MobileCode();
|
||||
};
|
||||
|
||||
struct InterpreterState {
|
||||
|
|
@ -92,6 +99,7 @@ struct InterpreterState {
|
|||
TORCH_API void run(Stack& stack);
|
||||
TORCH_API c10::intrusive_ptr<Future> runAsync(Stack& stack);
|
||||
c10::intrusive_ptr<Future> getFuture();
|
||||
TORCH_API ~InterpreterState();
|
||||
|
||||
private:
|
||||
InterpreterState(c10::intrusive_ptr<c10::intrusive_ptr_target> pImpl);
|
||||
|
|
@ -119,19 +127,18 @@ struct Suspend : public std::exception {
|
|||
// through (and only through) the forward pass manually, other
|
||||
// thread local settings are propagated with ThreadLocalState
|
||||
struct InterpreterContinuation {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
InterpreterContinuation(
|
||||
InterpreterState state_,
|
||||
const InterpreterState& state_,
|
||||
Stack stack_,
|
||||
int64_t dist_autograd_context_id = 0,
|
||||
c10::optional<at::ThreadLocalState> tls_state = c10::nullopt)
|
||||
: state(std::move(state_)),
|
||||
: state(state_),
|
||||
stack(std::move(stack_)),
|
||||
tls_state_(std::move(tls_state))
|
||||
tls_state_(std::move(tls_state)) {
|
||||
#ifdef USE_DISTRIBUTED
|
||||
,
|
||||
dist_autograd_context_id_(dist_autograd_context_id)
|
||||
dist_autograd_context_id_ = dist_autograd_context_id;
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
||||
void operator()();
|
||||
|
|
@ -156,3 +163,5 @@ TORCH_API std::vector<StackEntry> currentCallstack();
|
|||
TORCH_API std::vector<std::string> currentModuleHierarchy();
|
||||
|
||||
} // namespace torch::jit
|
||||
|
||||
C10_CLANG_DIAGNOSTIC_POP()
|
||||
|
|
|
|||
|
|
@ -1677,6 +1677,8 @@ uint64_t PythonPrint::minVersion() const {
|
|||
return pImpl->min_version_;
|
||||
}
|
||||
|
||||
PythonPrint::~PythonPrint() = default;
|
||||
|
||||
static std::vector<IValue> traverseIValueAndGetObjects(IValue ivalue) {
|
||||
std::vector<IValue> result;
|
||||
std::vector<IValue> stack;
|
||||
|
|
|
|||
|
|
@ -42,6 +42,8 @@ struct TORCH_API PythonPrint {
|
|||
const SourceRangeRecords& ranges() const;
|
||||
uint64_t minVersion() const;
|
||||
|
||||
~PythonPrint();
|
||||
|
||||
private:
|
||||
std::shared_ptr<PythonPrintImpl> pImpl;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ class TORCH_API Reducer {
|
|||
template <typename RI>
|
||||
Reducer(ExprHandle init, RI interaction)
|
||||
: init_(init.node()), interaction_(std::move(interaction)) {}
|
||||
virtual ~Reducer() = default;
|
||||
|
||||
ExprPtr initializer() const {
|
||||
return init_;
|
||||
|
|
|
|||
|
|
@ -67,8 +67,6 @@ class TORCH_API LazyTensor : public c10::intrusive_ptr_target {
|
|||
// used to rely on a LazyTensor obj with a null Data can now rely on a null
|
||||
// LazyTensorPtr instead.
|
||||
LazyTensor() = delete;
|
||||
LazyTensor(const LazyTensor&) = default;
|
||||
LazyTensor(LazyTensor&&) noexcept = default;
|
||||
|
||||
~LazyTensor() override = default;
|
||||
|
||||
|
|
|
|||
|
|
@ -49,6 +49,7 @@ struct TORCH_API ExperimentalConfig {
|
|||
std::vector<std::string> performance_events = {},
|
||||
bool enable_cuda_sync_events = false,
|
||||
bool adjust_timestamps = false);
|
||||
~ExperimentalConfig() = default;
|
||||
explicit operator bool() const;
|
||||
|
||||
std::vector<std::string> profiler_metrics;
|
||||
|
|
@ -87,6 +88,7 @@ struct TORCH_API ProfilerConfig {
|
|||
bool with_flops = false,
|
||||
bool with_modules = false,
|
||||
ExperimentalConfig experimental_config = ExperimentalConfig());
|
||||
~ProfilerConfig() = default;
|
||||
|
||||
bool disabled() const;
|
||||
bool global() const;
|
||||
|
|
|
|||
|
|
@ -6,9 +6,7 @@
|
|||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <string_view>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace torch {
|
||||
|
||||
|
|
@ -19,17 +17,12 @@ std::string py_typename(PyObject* object) {
|
|||
}
|
||||
|
||||
struct Type {
|
||||
Type() = default;
|
||||
Type(const Type&) = default;
|
||||
Type& operator=(const Type&) = default;
|
||||
Type(Type&&) noexcept = default;
|
||||
Type& operator=(Type&&) noexcept = default;
|
||||
virtual bool is_matching(PyObject* object) = 0;
|
||||
virtual ~Type() = default;
|
||||
};
|
||||
|
||||
struct SimpleType : public Type {
|
||||
SimpleType(std::string_view name) : name(name){};
|
||||
SimpleType(std::string& name) : name(name){};
|
||||
|
||||
bool is_matching(PyObject* object) override {
|
||||
return py_typename(object) == name;
|
||||
|
|
@ -43,10 +36,11 @@ struct MultiType : public Type {
|
|||
: types(accepted_types){};
|
||||
|
||||
bool is_matching(PyObject* object) override {
|
||||
return types.find(py_typename(object)) != types.end();
|
||||
auto it = std::find(types.begin(), types.end(), py_typename(object));
|
||||
return it != types.end();
|
||||
}
|
||||
|
||||
std::unordered_set<std::string> types;
|
||||
std::vector<std::string> types;
|
||||
};
|
||||
|
||||
struct NullableType : public Type {
|
||||
|
|
@ -99,8 +93,8 @@ struct SequenceType : public Type {
|
|||
};
|
||||
|
||||
struct Argument {
|
||||
Argument(std::string_view name, std::unique_ptr<Type> type)
|
||||
: name(name), type(std::move(type)){};
|
||||
Argument(std::string name, std::unique_ptr<Type> type)
|
||||
: name(std::move(name)), type(std::move(type)){};
|
||||
|
||||
std::string name;
|
||||
std::unique_ptr<Type> type;
|
||||
|
|
@ -124,13 +118,13 @@ struct Option {
|
|||
bool has_out;
|
||||
};
|
||||
|
||||
std::vector<std::string_view> _splitString(
|
||||
std::string_view s,
|
||||
std::string_view delim) {
|
||||
std::vector<std::string_view> tokens;
|
||||
std::vector<std::string> _splitString(
|
||||
const std::string& s,
|
||||
const std::string& delim) {
|
||||
std::vector<std::string> tokens;
|
||||
size_t start = 0;
|
||||
size_t end = 0;
|
||||
while ((end = s.find(delim, start)) != std::string_view::npos) {
|
||||
while ((end = s.find(delim, start)) != std::string::npos) {
|
||||
tokens.push_back(s.substr(start, end - start));
|
||||
start = end + delim.length();
|
||||
}
|
||||
|
|
@ -138,7 +132,7 @@ std::vector<std::string_view> _splitString(
|
|||
return tokens;
|
||||
}
|
||||
|
||||
std::unique_ptr<Type> _buildType(std::string_view type_name, bool is_nullable) {
|
||||
std::unique_ptr<Type> _buildType(std::string type_name, bool is_nullable) {
|
||||
std::unique_ptr<Type> result;
|
||||
if (type_name == "float") {
|
||||
result = std::make_unique<MultiType>(MultiType{"float", "int", "long"});
|
||||
|
|
@ -146,16 +140,14 @@ std::unique_ptr<Type> _buildType(std::string_view type_name, bool is_nullable) {
|
|||
result = std::make_unique<MultiType>(MultiType{"int", "long"});
|
||||
} else if (type_name.find("tuple[") == 0) {
|
||||
auto type_list = type_name.substr(6);
|
||||
type_list.remove_suffix(1);
|
||||
auto sub_string_views = _splitString(type_list, ",");
|
||||
type_list.pop_back();
|
||||
std::vector<std::unique_ptr<Type>> types;
|
||||
types.reserve(sub_string_views.size());
|
||||
for (auto& type : sub_string_views)
|
||||
for (auto& type : _splitString(type_list, ","))
|
||||
types.emplace_back(_buildType(type, false));
|
||||
result = std::make_unique<TupleType>(std::move(types));
|
||||
} else if (type_name.find("sequence[") == 0) {
|
||||
auto subtype = type_name.substr(9);
|
||||
subtype.remove_suffix(1);
|
||||
subtype.pop_back();
|
||||
result = std::make_unique<SequenceType>(_buildType(subtype, false));
|
||||
} else {
|
||||
result = std::make_unique<SimpleType>(type_name);
|
||||
|
|
@ -202,7 +194,7 @@ std::pair<Option, std::string> _parseOption(
|
|||
if (arg[type_start_idx] == '[') {
|
||||
is_nullable = true;
|
||||
type_start_idx++;
|
||||
arg.remove_suffix(std::string(" or None]").length());
|
||||
arg.erase(arg.length() - std::string(" or None]").length());
|
||||
}
|
||||
|
||||
auto type_end_idx = arg.find_last_of(' ');
|
||||
|
|
@ -211,15 +203,17 @@ std::pair<Option, std::string> _parseOption(
|
|||
// "type ... name" => "type ... name"
|
||||
// ^ ^
|
||||
auto dots_idx = arg.find("...");
|
||||
if (dots_idx != std::string_view::npos)
|
||||
if (dots_idx != std::string::npos)
|
||||
type_end_idx -= 4;
|
||||
|
||||
auto type_name = arg.substr(type_start_idx, type_end_idx - type_start_idx);
|
||||
auto name = arg.substr(name_start_idx);
|
||||
std::string type_name =
|
||||
arg.substr(type_start_idx, type_end_idx - type_start_idx);
|
||||
std::string name = arg.substr(name_start_idx);
|
||||
|
||||
arguments.emplace_back(name, _buildType(type_name, is_nullable));
|
||||
}
|
||||
|
||||
bool is_variadic = option_str.find("...") != std::string_view::npos;
|
||||
bool is_variadic = option_str.find("...") != std::string::npos;
|
||||
return std::pair<Option, std::string>(
|
||||
Option(std::move(arguments), is_variadic, has_out),
|
||||
std::move(printable_option));
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user