mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Use Wextra-semi (#140236)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/140236 Approved by: https://github.com/ezyang
This commit is contained in:
parent
fb7148d05d
commit
40fb738197
|
|
@ -106,10 +106,11 @@ StatementMacros:
|
||||||
- C10_DEFINE_int32
|
- C10_DEFINE_int32
|
||||||
- C10_DEFINE_int64
|
- C10_DEFINE_int64
|
||||||
- C10_DEFINE_string
|
- C10_DEFINE_string
|
||||||
|
- DEFINE_BINARY
|
||||||
- PyObject_HEAD
|
- PyObject_HEAD
|
||||||
- PyObject_VAR_HEAD
|
- PyObject_VAR_HEAD
|
||||||
- PyException_HEAD
|
- PyException_HEAD
|
||||||
- DEFINE_BINARY
|
- TORCH_DECLARE_bool
|
||||||
|
|
||||||
TabWidth: 8
|
TabWidth: 8
|
||||||
UseTab: Never
|
UseTab: Never
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@
|
||||||
//
|
//
|
||||||
// In native/MyKernel.h:
|
// In native/MyKernel.h:
|
||||||
// using fn_type = void(*)(const Tensor& x);
|
// using fn_type = void(*)(const Tensor& x);
|
||||||
// DECLARE_DISPATCH(fn_type, stub);
|
// DECLARE_DISPATCH(fn_type, stub)
|
||||||
//
|
//
|
||||||
// In native/MyKernel.cpp
|
// In native/MyKernel.cpp
|
||||||
// DEFINE_DISPATCH(stub);
|
// DEFINE_DISPATCH(stub);
|
||||||
|
|
|
||||||
|
|
@ -476,8 +476,8 @@ void bf16_gemv_trans(
|
||||||
#if !defined(C10_MOBILE)
|
#if !defined(C10_MOBILE)
|
||||||
REGISTER_DISPATCH(fp16_dot_with_fp32_arith_stub, &fp16_dot_with_fp32_arith)
|
REGISTER_DISPATCH(fp16_dot_with_fp32_arith_stub, &fp16_dot_with_fp32_arith)
|
||||||
REGISTER_DISPATCH(fp16_gemv_trans_stub, &fp16_gemv_trans)
|
REGISTER_DISPATCH(fp16_gemv_trans_stub, &fp16_gemv_trans)
|
||||||
REGISTER_DISPATCH(bf16_dot_with_fp32_arith_stub, &bf16_dot_with_fp32_arith);
|
REGISTER_DISPATCH(bf16_dot_with_fp32_arith_stub, &bf16_dot_with_fp32_arith)
|
||||||
REGISTER_DISPATCH(bf16_gemv_trans_stub, &bf16_gemv_trans);
|
REGISTER_DISPATCH(bf16_gemv_trans_stub, &bf16_gemv_trans)
|
||||||
#endif //!defined(C10_MOBILE)
|
#endif //!defined(C10_MOBILE)
|
||||||
|
|
||||||
} // namespace at::native
|
} // namespace at::native
|
||||||
|
|
|
||||||
|
|
@ -387,9 +387,8 @@ function(torch_compile_options libname)
|
||||||
list(APPEND private_compile_options -Wunused-but-set-variable)
|
list(APPEND private_compile_options -Wunused-but-set-variable)
|
||||||
endif()
|
endif()
|
||||||
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
||||||
list(APPEND private_compile_options -Wunused-private-field)
|
list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi)
|
||||||
endif()
|
else()
|
||||||
if(NOT "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
|
||||||
list(APPEND private_compile_options
|
list(APPEND private_compile_options
|
||||||
# Considered to be flaky. See the discussion at
|
# Considered to be flaky. See the discussion at
|
||||||
# https://github.com/pytorch/pytorch/pull/9608
|
# https://github.com/pytorch/pytorch/pull/9608
|
||||||
|
|
|
||||||
|
|
@ -70,15 +70,15 @@ SIGNAL_HANDLER(
|
||||||
SIGBUS,
|
SIGBUS,
|
||||||
handler_SIGBUS,
|
handler_SIGBUS,
|
||||||
"ERROR: Unexpected bus error encountered in worker. "
|
"ERROR: Unexpected bus error encountered in worker. "
|
||||||
"This might be caused by insufficient shared memory (shm).\n");
|
"This might be caused by insufficient shared memory (shm).\n")
|
||||||
SIGNAL_HANDLER(
|
SIGNAL_HANDLER(
|
||||||
SIGSEGV,
|
SIGSEGV,
|
||||||
handler_SIGSEGV,
|
handler_SIGSEGV,
|
||||||
"ERROR: Unexpected segmentation fault encountered in worker.\n");
|
"ERROR: Unexpected segmentation fault encountered in worker.\n")
|
||||||
SIGNAL_HANDLER(
|
SIGNAL_HANDLER(
|
||||||
SIGFPE,
|
SIGFPE,
|
||||||
handler_SIGFPE,
|
handler_SIGFPE,
|
||||||
"ERROR: Unexpected floating-point exception encountered in worker.\n");
|
"ERROR: Unexpected floating-point exception encountered in worker.\n")
|
||||||
|
|
||||||
// When an error happened in DataLoader methods and Python starts to exit, the
|
// When an error happened in DataLoader methods and Python starts to exit, the
|
||||||
// error trace will keep the loader alive, and Python may kill the children
|
// error trace will keep the loader alive, and Python may kill the children
|
||||||
|
|
|
||||||
|
|
@ -339,7 +339,7 @@ struct noop_gil_scoped_release {
|
||||||
// user-defined constructor (i.e. not defaulted) to avoid
|
// user-defined constructor (i.e. not defaulted) to avoid
|
||||||
// unused-variable warnings at usage sites of this class
|
// unused-variable warnings at usage sites of this class
|
||||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||||
noop_gil_scoped_release(){};
|
noop_gil_scoped_release() {}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <bool release_gil>
|
template <bool release_gil>
|
||||||
|
|
|
||||||
|
|
@ -273,14 +273,14 @@ void ConcretePyInterpreterVTable::decref(PyObject* pyobj, bool has_pyobj_slot)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Py_DECREF(pyobj);
|
Py_DECREF(pyobj);
|
||||||
};
|
}
|
||||||
|
|
||||||
void ConcretePyInterpreterVTable::incref(PyObject* pyobj) const {
|
void ConcretePyInterpreterVTable::incref(PyObject* pyobj) const {
|
||||||
if (!Py_IsInitialized())
|
if (!Py_IsInitialized())
|
||||||
return;
|
return;
|
||||||
pybind11::gil_scoped_acquire gil;
|
pybind11::gil_scoped_acquire gil;
|
||||||
Py_INCREF(pyobj);
|
Py_INCREF(pyobj);
|
||||||
};
|
}
|
||||||
|
|
||||||
bool isPythonTensor(const at::Tensor& tensor) {
|
bool isPythonTensor(const at::Tensor& tensor) {
|
||||||
return tensor.unsafeGetTensorImpl()->key_set().has(c10::DispatchKey::Python);
|
return tensor.unsafeGetTensorImpl()->key_set().has(c10::DispatchKey::Python);
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@
|
||||||
#define THPStorageStr "torch.UntypedStorage"
|
#define THPStorageStr "torch.UntypedStorage"
|
||||||
|
|
||||||
struct THPStorage {
|
struct THPStorage {
|
||||||
PyObject_HEAD;
|
PyObject_HEAD
|
||||||
c10::MaybeOwned<c10::Storage> cdata;
|
c10::MaybeOwned<c10::Storage> cdata;
|
||||||
bool is_hermetic;
|
bool is_hermetic;
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -5249,7 +5249,7 @@ static Tensor apply_simple_transformation(
|
||||||
return condition_with_I ? K - transformation : -transformation;
|
return condition_with_I ? K - transformation : -transformation;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
|
||||||
std::tuple<Tensor, Tensor> householder_product_backward(
|
std::tuple<Tensor, Tensor> householder_product_backward(
|
||||||
const Tensor& grad,
|
const Tensor& grad,
|
||||||
|
|
|
||||||
|
|
@ -77,7 +77,7 @@ PyCodeObject* getCode<CallType::PyModuleCall>() {
|
||||||
return (PyCodeObject*)res;
|
return (PyCodeObject*)res;
|
||||||
}();
|
}();
|
||||||
return module_call_code;
|
return module_call_code;
|
||||||
};
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
PyCodeObject* getCode<CallType::PyOptimizerCall>() {
|
PyCodeObject* getCode<CallType::PyOptimizerCall>() {
|
||||||
|
|
@ -92,7 +92,7 @@ PyCodeObject* getCode<CallType::PyOptimizerCall>() {
|
||||||
return (PyCodeObject*)res;
|
return (PyCodeObject*)res;
|
||||||
}();
|
}();
|
||||||
return optimizer_step_code;
|
return optimizer_step_code;
|
||||||
};
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace torch::profiler::impl
|
} // namespace torch::profiler::impl
|
||||||
|
|
@ -548,7 +548,7 @@ struct TraceKeyCacheState {
|
||||||
// `PyEval_SetProfile`.
|
// `PyEval_SetProfile`.
|
||||||
struct ThreadLocalResults;
|
struct ThreadLocalResults;
|
||||||
struct TraceContext {
|
struct TraceContext {
|
||||||
PyObject_HEAD;
|
PyObject_HEAD
|
||||||
ThreadLocalResults* thread_local_results_;
|
ThreadLocalResults* thread_local_results_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -795,7 +795,7 @@ PythonTracer::PythonTracer(torch::profiler::impl::RecordQueue* queue)
|
||||||
// cannot be round tripped via `sys.settrace(sys.gettrace())`
|
// cannot be round tripped via `sys.settrace(sys.gettrace())`
|
||||||
PyEval_SetProfile(PythonTracer::pyProfileFn, (PyObject*)ctx);
|
PyEval_SetProfile(PythonTracer::pyProfileFn, (PyObject*)ctx);
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
|
||||||
void PythonTracer::stop() {
|
void PythonTracer::stop() {
|
||||||
gil_and_restore_thread gil;
|
gil_and_restore_thread gil;
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ namespace py = pybind11;
|
||||||
|
|
||||||
// Python object that backs torch.autograd.Variable
|
// Python object that backs torch.autograd.Variable
|
||||||
struct THPVariable {
|
struct THPVariable {
|
||||||
PyObject_HEAD;
|
PyObject_HEAD
|
||||||
// Payload
|
// Payload
|
||||||
c10::MaybeOwned<at::Tensor> cdata;
|
c10::MaybeOwned<at::Tensor> cdata;
|
||||||
// Hooks to be run on backwards pass (corresponds to Python attr
|
// Hooks to be run on backwards pass (corresponds to Python attr
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
namespace torch::distributed::autograd {
|
namespace torch::distributed::autograd {
|
||||||
|
|
||||||
CleanupAutogradContextReq::CleanupAutogradContextReq(int64_t context_id)
|
CleanupAutogradContextReq::CleanupAutogradContextReq(int64_t context_id)
|
||||||
: context_id_(context_id){};
|
: context_id_(context_id) {}
|
||||||
|
|
||||||
int64_t CleanupAutogradContextReq::getContextId() {
|
int64_t CleanupAutogradContextReq::getContextId() {
|
||||||
return context_id_;
|
return context_id_;
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ class DetectorMap {
|
||||||
cached_;
|
cached_;
|
||||||
};
|
};
|
||||||
|
|
||||||
}; // namespace
|
} // namespace
|
||||||
|
|
||||||
namespace c10d {
|
namespace c10d {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -139,7 +139,7 @@ class IntrusivePtrNoGilDestructor {
|
||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
PYBIND11_DECLARE_HOLDER_TYPE(T, IntrusivePtrNoGilDestructor<T>, true);
|
PYBIND11_DECLARE_HOLDER_TYPE(T, IntrusivePtrNoGilDestructor<T>, true)
|
||||||
|
|
||||||
namespace torch::distributed::c10d {
|
namespace torch::distributed::c10d {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ C10_DEFINE_TYPED_REGISTRY( // NOLINT
|
||||||
c10::DeviceType,
|
c10::DeviceType,
|
||||||
Timer,
|
Timer,
|
||||||
std::unique_ptr,
|
std::unique_ptr,
|
||||||
c10::Device);
|
c10::Device)
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
|
@ -67,7 +67,7 @@ class CpuTimer : public Timer {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
C10_REGISTER_TYPED_CLASS(TimerRegistry, c10::kCPU, CpuTimer);
|
C10_REGISTER_TYPED_CLASS(TimerRegistry, c10::kCPU, CpuTimer)
|
||||||
|
|
||||||
std::vector<at::Tensor> extractTensors(const c10::IValue& result) {
|
std::vector<at::Tensor> extractTensors(const c10::IValue& result) {
|
||||||
if (result.isPyObject()) {
|
if (result.isPyObject()) {
|
||||||
|
|
|
||||||
|
|
@ -3,5 +3,5 @@
|
||||||
namespace torch::distributed::rpc {
|
namespace torch::distributed::rpc {
|
||||||
C10_DEFINE_REGISTRY(
|
C10_DEFINE_REGISTRY(
|
||||||
RpcMetricsHandlerRegistry,
|
RpcMetricsHandlerRegistry,
|
||||||
torch::distributed::rpc::RpcMetricsHandler);
|
torch::distributed::rpc::RpcMetricsHandler)
|
||||||
} // namespace torch::distributed::rpc
|
} // namespace torch::distributed::rpc
|
||||||
|
|
|
||||||
|
|
@ -229,7 +229,7 @@ namespace {
|
||||||
typedef std::vector<TensorCheck> ChecksList;
|
typedef std::vector<TensorCheck> ChecksList;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
PyObject_HEAD;
|
PyObject_HEAD
|
||||||
ChecksList* checks;
|
ChecksList* checks;
|
||||||
} TensorGuards;
|
} TensorGuards;
|
||||||
|
|
||||||
|
|
@ -510,7 +510,7 @@ static PyTypeObject TensorGuardsType = { PyVarObject_HEAD_INIT(nullptr, 0)
|
||||||
// merged.
|
// merged.
|
||||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||||
struct GlobalStateGuard {
|
struct GlobalStateGuard {
|
||||||
PyObject_HEAD;
|
PyObject_HEAD
|
||||||
|
|
||||||
inline void init() {
|
inline void init() {
|
||||||
auto& ctx = at::globalContext();
|
auto& ctx = at::globalContext();
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@
|
||||||
static struct PyModuleDef _module =
|
static struct PyModuleDef _module =
|
||||||
{PyModuleDef_HEAD_INIT, "torch._C._dynamo", "", -1, nullptr};
|
{PyModuleDef_HEAD_INIT, "torch._C._dynamo", "", -1, nullptr};
|
||||||
|
|
||||||
PYBIND11_MAKE_OPAQUE(std::vector<uint8_t>);
|
PYBIND11_MAKE_OPAQUE(std::vector<uint8_t>)
|
||||||
|
|
||||||
namespace torch::dynamo {
|
namespace torch::dynamo {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@
|
||||||
#include <torch/csrc/jit/passes/autocast.h>
|
#include <torch/csrc/jit/passes/autocast.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
torch_jit_do_not_store_optimized_graph,
|
torch_jit_do_not_store_optimized_graph,
|
||||||
false,
|
false,
|
||||||
|
|
|
||||||
|
|
@ -61,7 +61,7 @@ std::unique_ptr<Stack> MTensorArgumentCreator(Node* n) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return stack;
|
return stack;
|
||||||
};
|
}
|
||||||
|
|
||||||
bool MTensorNodeArgValid(Value* value) {
|
bool MTensorNodeArgValid(Value* value) {
|
||||||
auto tensor_type = value->type()->cast<TensorType>();
|
auto tensor_type = value->type()->cast<TensorType>();
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,7 @@ namespace {
|
||||||
|
|
||||||
class NodeNameGenerator {
|
class NodeNameGenerator {
|
||||||
public:
|
public:
|
||||||
NodeNameGenerator(std::shared_ptr<Graph> g) : graph_(std::move(g)){};
|
NodeNameGenerator(std::shared_ptr<Graph> g) : graph_(std::move(g)) {}
|
||||||
virtual ~NodeNameGenerator() = 0;
|
virtual ~NodeNameGenerator() = 0;
|
||||||
void PopulateNodeNames();
|
void PopulateNodeNames();
|
||||||
|
|
||||||
|
|
@ -105,7 +105,7 @@ NodeNameGenerator::~NodeNameGenerator() = default;
|
||||||
class ScopedNodeNameGenerator : public NodeNameGenerator {
|
class ScopedNodeNameGenerator : public NodeNameGenerator {
|
||||||
public:
|
public:
|
||||||
ScopedNodeNameGenerator(std::shared_ptr<Graph> g)
|
ScopedNodeNameGenerator(std::shared_ptr<Graph> g)
|
||||||
: NodeNameGenerator(std::move(g)){};
|
: NodeNameGenerator(std::move(g)) {}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void CreateNodeName(Node* n) override;
|
void CreateNodeName(Node* n) override;
|
||||||
|
|
|
||||||
|
|
@ -205,5 +205,5 @@ bool operator==(
|
||||||
const CanonicalizedSymbolicShape& a,
|
const CanonicalizedSymbolicShape& a,
|
||||||
const CanonicalizedSymbolicShape& b) {
|
const CanonicalizedSymbolicShape& b) {
|
||||||
return a.values_ == b.values_;
|
return a.values_ == b.values_;
|
||||||
};
|
}
|
||||||
} // namespace torch::jit
|
} // namespace torch::jit
|
||||||
|
|
|
||||||
|
|
@ -154,7 +154,7 @@ static std::vector<StrideInput> summarizeInputStrides(const TensorType& tt) {
|
||||||
summarizeStrideDim(sizes, strides, dim, stride_inputs, 0));
|
summarizeStrideDim(sizes, strides, dim, stride_inputs, 0));
|
||||||
}
|
}
|
||||||
return stride_inputs;
|
return stride_inputs;
|
||||||
};
|
}
|
||||||
|
|
||||||
// Todo: incorporate in codegen
|
// Todo: incorporate in codegen
|
||||||
static StrideInput summarizeOutputStrides(const TensorType& tt) {
|
static StrideInput summarizeOutputStrides(const TensorType& tt) {
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@
|
||||||
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
torch_jit_disable_cat,
|
torch_jit_disable_cat,
|
||||||
false,
|
false,
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ class unwrapping_shared_ptr {
|
||||||
|
|
||||||
} // namespace torch::jit
|
} // namespace torch::jit
|
||||||
|
|
||||||
PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr<T>, true);
|
PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr<T>, true)
|
||||||
|
|
||||||
namespace pybind11::detail {
|
namespace pybind11::detail {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -98,12 +98,12 @@ class ScriptDict final {
|
||||||
// not exist.
|
// not exist.
|
||||||
at::IValue getItem(const at::IValue& key) {
|
at::IValue getItem(const at::IValue& key) {
|
||||||
return dict_.at(key);
|
return dict_.at(key);
|
||||||
};
|
}
|
||||||
|
|
||||||
// Set the value for the given key.
|
// Set the value for the given key.
|
||||||
void setItem(const at::IValue& key, const at::IValue& value) {
|
void setItem(const at::IValue& key, const at::IValue& value) {
|
||||||
dict_.insert_or_assign(key, value);
|
dict_.insert_or_assign(key, value);
|
||||||
};
|
}
|
||||||
|
|
||||||
// Check whether the dictionary contains the given key.
|
// Check whether the dictionary contains the given key.
|
||||||
bool contains(const at::IValue& key) {
|
bool contains(const at::IValue& key) {
|
||||||
|
|
|
||||||
|
|
@ -92,7 +92,7 @@ class ScriptList final {
|
||||||
at::IValue getItem(diff_type idx) {
|
at::IValue getItem(diff_type idx) {
|
||||||
idx = wrap_index(idx);
|
idx = wrap_index(idx);
|
||||||
return list_.get(idx);
|
return list_.get(idx);
|
||||||
};
|
}
|
||||||
|
|
||||||
// Set the value corresponding to the given index.
|
// Set the value corresponding to the given index.
|
||||||
void setItem(diff_type idx, const at::IValue& value) {
|
void setItem(diff_type idx, const at::IValue& value) {
|
||||||
|
|
|
||||||
|
|
@ -127,7 +127,7 @@ struct VISIBILITY_HIDDEN ConstantParameterList : public SugaredValue {
|
||||||
|
|
||||||
struct VISIBILITY_HIDDEN ModuleDictMethod : public SugaredValue {
|
struct VISIBILITY_HIDDEN ModuleDictMethod : public SugaredValue {
|
||||||
explicit ModuleDictMethod(SugaredValuePtr iterable, std::string name)
|
explicit ModuleDictMethod(SugaredValuePtr iterable, std::string name)
|
||||||
: iterable_(std::move(iterable)), name_(std::move(name)){};
|
: iterable_(std::move(iterable)), name_(std::move(name)) {}
|
||||||
|
|
||||||
std::string kind() const override {
|
std::string kind() const override {
|
||||||
return name_;
|
return name_;
|
||||||
|
|
@ -286,7 +286,7 @@ struct VISIBILITY_HIDDEN SugaredDict : public SugaredValue {
|
||||||
|
|
||||||
SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override {
|
SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override {
|
||||||
return keys_;
|
return keys_;
|
||||||
};
|
}
|
||||||
|
|
||||||
std::shared_ptr<ModuleValue> self_;
|
std::shared_ptr<ModuleValue> self_;
|
||||||
std::shared_ptr<SugaredTupleValue> keys_;
|
std::shared_ptr<SugaredTupleValue> keys_;
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,7 @@ void ArgumentSpecCreator::scan(
|
||||||
} else {
|
} else {
|
||||||
instructions_.emplace_back(SKIP);
|
instructions_.emplace_back(SKIP);
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
|
||||||
// this is a coarse-grained guarantee that the slots of a class will not be
|
// this is a coarse-grained guarantee that the slots of a class will not be
|
||||||
// modified by the function. It works fine for things that used be read-only
|
// modified by the function. It works fine for things that used be read-only
|
||||||
|
|
|
||||||
|
|
@ -53,6 +53,7 @@
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
torch_jit_execution_plan_reuse_code_graph,
|
torch_jit_execution_plan_reuse_code_graph,
|
||||||
false,
|
false,
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,7 @@ using torch::distributed::autograd::DistAutogradContainer;
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
torch_jit_enable_rethrow_caught_exception,
|
torch_jit_enable_rethrow_caught_exception,
|
||||||
false,
|
false,
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,7 @@
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
torch_jit_enable_new_executor,
|
torch_jit_enable_new_executor,
|
||||||
true,
|
true,
|
||||||
|
|
|
||||||
|
|
@ -47,6 +47,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// used in test only
|
// used in test only
|
||||||
|
// clang-format off
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
static_runtime_disable_debug_memory_overlap_check,
|
static_runtime_disable_debug_memory_overlap_check,
|
||||||
false,
|
false,
|
||||||
|
|
|
||||||
|
|
@ -40,6 +40,7 @@
|
||||||
|
|
||||||
#include <ATen/CompositeExplicitAutogradFunctions.h>
|
#include <ATen/CompositeExplicitAutogradFunctions.h>
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
static_runtime_enable_fast_math,
|
static_runtime_enable_fast_math,
|
||||||
true,
|
true,
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@
|
||||||
#include <torch/csrc/jit/runtime/graph_iterator.h>
|
#include <torch/csrc/jit/runtime/graph_iterator.h>
|
||||||
#include <torch/csrc/jit/runtime/static/ops.h>
|
#include <torch/csrc/jit/runtime/static/ops.h>
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
enable_clip_ranges_gather_fusions,
|
enable_clip_ranges_gather_fusions,
|
||||||
true,
|
true,
|
||||||
|
|
|
||||||
|
|
@ -85,7 +85,7 @@ std::ostream& operator<<(std::ostream& out, const Check& c) {
|
||||||
}
|
}
|
||||||
out << ": " << c.search_str_;
|
out << ": " << c.search_str_;
|
||||||
return out;
|
return out;
|
||||||
};
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
#include <c10/util/env.h>
|
#include <c10/util/env.h>
|
||||||
#include <torch/csrc/lazy/core/config.h>
|
#include <torch/csrc/lazy/core/config.h>
|
||||||
|
|
||||||
C10_DEFINE_bool(torch_lazy_ir_debug, false, "Enable lazy tensor IR debugging");
|
C10_DEFINE_bool(torch_lazy_ir_debug, false, "Enable lazy tensor IR debugging")
|
||||||
|
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
torch_lazy_param_aliasing,
|
torch_lazy_param_aliasing,
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@
|
||||||
|
|
||||||
// Enables caching on for dynamic shapes (aka disable hash on shapes)
|
// Enables caching on for dynamic shapes (aka disable hash on shapes)
|
||||||
// NOLINTNEXTLINE(misc-use-internal-linkage)
|
// NOLINTNEXTLINE(misc-use-internal-linkage)
|
||||||
|
// clang-format off
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
ltc_enable_dynamic_shapes,
|
ltc_enable_dynamic_shapes,
|
||||||
false,
|
false,
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
ltc_enable_symbolic_shapes,
|
ltc_enable_symbolic_shapes,
|
||||||
false,
|
false,
|
||||||
"Enables calculation of if dims are symbolic");
|
"Enables calculation of if dims are symbolic")
|
||||||
|
|
||||||
namespace torch::lazy {
|
namespace torch::lazy {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
torch_lazy_ts_tensor_update_sync,
|
torch_lazy_ts_tensor_update_sync,
|
||||||
true,
|
true,
|
||||||
"Use synchronous copy inside _copy_from op");
|
"Use synchronous copy inside _copy_from op")
|
||||||
|
|
||||||
// TODO(whc) we need to hook up these flags in a more useful way
|
// TODO(whc) we need to hook up these flags in a more useful way
|
||||||
// possibly also keep LTC_TS_CUDA env working?
|
// possibly also keep LTC_TS_CUDA env working?
|
||||||
|
|
@ -13,4 +13,4 @@ C10_DEFINE_bool(
|
||||||
C10_DEFINE_bool(
|
C10_DEFINE_bool(
|
||||||
torch_lazy_ts_cuda,
|
torch_lazy_ts_cuda,
|
||||||
false,
|
false,
|
||||||
"Use cuda device for torchscript backend (instead of CPU)");
|
"Use cuda device for torchscript backend (instead of CPU)")
|
||||||
|
|
|
||||||
|
|
@ -12,12 +12,12 @@ using torch::profiler::impl::TensorID;
|
||||||
template <> \
|
template <> \
|
||||||
struct type_caster<T> : public strong_pointer_type_caster<T> {};
|
struct type_caster<T> : public strong_pointer_type_caster<T> {};
|
||||||
|
|
||||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::StorageImplData);
|
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::StorageImplData)
|
||||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::AllocationID);
|
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::AllocationID)
|
||||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::TensorImplAddress);
|
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::TensorImplAddress)
|
||||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleSelf);
|
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleSelf)
|
||||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleCls);
|
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleCls)
|
||||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyOptimizerSelf);
|
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyOptimizerSelf)
|
||||||
#undef STRONG_POINTER_TYPE_CASTER
|
#undef STRONG_POINTER_TYPE_CASTER
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ struct Type {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SimpleType : public Type {
|
struct SimpleType : public Type {
|
||||||
SimpleType(std::string& name) : name(name){};
|
SimpleType(std::string& name) : name(name) {}
|
||||||
|
|
||||||
bool is_matching(PyObject* object) override {
|
bool is_matching(PyObject* object) override {
|
||||||
return py_typename(object) == name;
|
return py_typename(object) == name;
|
||||||
|
|
@ -38,7 +38,7 @@ struct SimpleType : public Type {
|
||||||
|
|
||||||
struct MultiType : public Type {
|
struct MultiType : public Type {
|
||||||
MultiType(std::initializer_list<std::string> accepted_types)
|
MultiType(std::initializer_list<std::string> accepted_types)
|
||||||
: types(accepted_types){};
|
: types(accepted_types) {}
|
||||||
|
|
||||||
bool is_matching(PyObject* object) override {
|
bool is_matching(PyObject* object) override {
|
||||||
auto it = std::find(types.begin(), types.end(), py_typename(object));
|
auto it = std::find(types.begin(), types.end(), py_typename(object));
|
||||||
|
|
@ -49,7 +49,7 @@ struct MultiType : public Type {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NullableType : public Type {
|
struct NullableType : public Type {
|
||||||
NullableType(std::unique_ptr<Type> type) : type(std::move(type)){};
|
NullableType(std::unique_ptr<Type> type) : type(std::move(type)) {}
|
||||||
|
|
||||||
bool is_matching(PyObject* object) override {
|
bool is_matching(PyObject* object) override {
|
||||||
return object == Py_None || type->is_matching(object);
|
return object == Py_None || type->is_matching(object);
|
||||||
|
|
@ -60,7 +60,7 @@ struct NullableType : public Type {
|
||||||
|
|
||||||
struct TupleType : public Type {
|
struct TupleType : public Type {
|
||||||
TupleType(std::vector<std::unique_ptr<Type>> types)
|
TupleType(std::vector<std::unique_ptr<Type>> types)
|
||||||
: types(std::move(types)){};
|
: types(std::move(types)) {}
|
||||||
|
|
||||||
bool is_matching(PyObject* object) override {
|
bool is_matching(PyObject* object) override {
|
||||||
if (!PyTuple_Check(object))
|
if (!PyTuple_Check(object))
|
||||||
|
|
@ -79,7 +79,7 @@ struct TupleType : public Type {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SequenceType : public Type {
|
struct SequenceType : public Type {
|
||||||
SequenceType(std::unique_ptr<Type> type) : type(std::move(type)){};
|
SequenceType(std::unique_ptr<Type> type) : type(std::move(type)) {}
|
||||||
|
|
||||||
bool is_matching(PyObject* object) override {
|
bool is_matching(PyObject* object) override {
|
||||||
if (!PySequence_Check(object))
|
if (!PySequence_Check(object))
|
||||||
|
|
@ -99,7 +99,7 @@ struct SequenceType : public Type {
|
||||||
|
|
||||||
struct Argument {
|
struct Argument {
|
||||||
Argument(std::string name, std::unique_ptr<Type> type)
|
Argument(std::string name, std::unique_ptr<Type> type)
|
||||||
: name(std::move(name)), type(std::move(type)){};
|
: name(std::move(name)), type(std::move(type)) {}
|
||||||
|
|
||||||
std::string name;
|
std::string name;
|
||||||
std::unique_ptr<Type> type;
|
std::unique_ptr<Type> type;
|
||||||
|
|
@ -109,9 +109,9 @@ struct Option {
|
||||||
Option(std::vector<Argument> arguments, bool is_variadic, bool has_out)
|
Option(std::vector<Argument> arguments, bool is_variadic, bool has_out)
|
||||||
: arguments(std::move(arguments)),
|
: arguments(std::move(arguments)),
|
||||||
is_variadic(is_variadic),
|
is_variadic(is_variadic),
|
||||||
has_out(has_out){};
|
has_out(has_out) {}
|
||||||
Option(bool is_variadic, bool has_out)
|
Option(bool is_variadic, bool has_out)
|
||||||
: arguments(), is_variadic(is_variadic), has_out(has_out){};
|
: arguments(), is_variadic(is_variadic), has_out(has_out) {}
|
||||||
Option(const Option&) = delete;
|
Option(const Option&) = delete;
|
||||||
Option(Option&& other) noexcept = default;
|
Option(Option&& other) noexcept = default;
|
||||||
Option& operator=(const Option&) = delete;
|
Option& operator=(const Option&) = delete;
|
||||||
|
|
|
||||||
|
|
@ -7,15 +7,15 @@
|
||||||
template <class T>
|
template <class T>
|
||||||
class TORCH_PYTHON_API THPPointer {
|
class TORCH_PYTHON_API THPPointer {
|
||||||
public:
|
public:
|
||||||
THPPointer() : ptr(nullptr){};
|
THPPointer() : ptr(nullptr) {}
|
||||||
explicit THPPointer(T* ptr) noexcept : ptr(ptr){};
|
explicit THPPointer(T* ptr) noexcept : ptr(ptr) {}
|
||||||
THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {}
|
THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {}
|
||||||
THPPointer(const THPPointer& p) = delete;
|
THPPointer(const THPPointer& p) = delete;
|
||||||
THPPointer& operator=(const THPPointer&) = delete;
|
THPPointer& operator=(const THPPointer&) = delete;
|
||||||
|
|
||||||
~THPPointer() {
|
~THPPointer() {
|
||||||
free();
|
free();
|
||||||
};
|
}
|
||||||
T* get() {
|
T* get() {
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,10 +24,10 @@ namespace py = pybind11;
|
||||||
// This makes intrusive_ptr to be available as a custom pybind11 holder type,
|
// This makes intrusive_ptr to be available as a custom pybind11 holder type,
|
||||||
// see
|
// see
|
||||||
// https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers
|
// https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers
|
||||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr<T>, true);
|
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr<T>, true)
|
||||||
|
|
||||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr<T>);
|
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr<T>)
|
||||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr<T>, true);
|
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr<T>, true)
|
||||||
|
|
||||||
namespace pybind11::detail {
|
namespace pybind11::detail {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ class PythonSymNodeImpl : public c10::SymNodeImpl {
|
||||||
PythonSymNodeImpl(py::object pyobj) : c10::SymNodeImpl() {
|
PythonSymNodeImpl(py::object pyobj) : c10::SymNodeImpl() {
|
||||||
pyobj_ = std::make_shared<c10::SafePyObject>(
|
pyobj_ = std::make_shared<c10::SafePyObject>(
|
||||||
pyobj.release().ptr(), getPyInterpreter());
|
pyobj.release().ptr(), getPyInterpreter());
|
||||||
};
|
}
|
||||||
|
|
||||||
c10::SymNode wrap_int(int64_t num) override {
|
c10::SymNode wrap_int(int64_t num) override {
|
||||||
py::gil_scoped_acquire acquire;
|
py::gil_scoped_acquire acquire;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user