mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 00:20:18 +01:00
Fix extra semicolon warning (#148291)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/148291 Approved by: https://github.com/Skylion007
This commit is contained in:
parent
1c544a9ddd
commit
09291817b2
|
|
@ -11,7 +11,7 @@
|
|||
C10_DEFINE_bool(
|
||||
caffe2_report_cpu_memory_usage,
|
||||
false,
|
||||
"If set, print out detailed memory usage");
|
||||
"If set, print out detailed memory usage")
|
||||
|
||||
namespace c10 {
|
||||
|
||||
|
|
@ -196,7 +196,7 @@ at::Allocator* GetDefaultCPUAllocator() {
|
|||
return &g_cpu_alloc;
|
||||
}
|
||||
|
||||
REGISTER_ALLOCATOR(DeviceType::CPU, &g_cpu_alloc);
|
||||
REGISTER_ALLOCATOR(DeviceType::CPU, &g_cpu_alloc)
|
||||
|
||||
#endif /* C10_Mobile */
|
||||
|
||||
|
|
|
|||
|
|
@ -196,72 +196,72 @@ struct Convert<SymFloat> {
|
|||
#define DEFINE_SYMINT_OP_INTONLY(scalar_t, RetTy) \
|
||||
RetTy operator%(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) % RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
RetTy operator%(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) % Convert<RetTy>()(b); \
|
||||
};
|
||||
}
|
||||
|
||||
#define DEFINE_SYMINT_OP(scalar_t, RetTy) \
|
||||
RetTy operator+(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) + RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
RetTy operator-(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) - RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
RetTy operator*(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) * RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
RetTy operator/(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) / RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
RetTy operator+(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) + Convert<RetTy>()(b); \
|
||||
}; \
|
||||
} \
|
||||
RetTy operator-(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) - Convert<RetTy>()(b); \
|
||||
}; \
|
||||
} \
|
||||
RetTy operator*(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) * Convert<RetTy>()(b); \
|
||||
}; \
|
||||
} \
|
||||
RetTy operator/(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) / Convert<RetTy>()(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator==(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) == RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator!=(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) != RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator<(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) < RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator<=(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) <= RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator>(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) > RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator>=(const SymInt& a, scalar_t b) { \
|
||||
return Convert<RetTy>()(a) >= RetTy(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator==(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) == Convert<RetTy>()(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator!=(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) != Convert<RetTy>()(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator<(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) < Convert<RetTy>()(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator<=(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) <= Convert<RetTy>()(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator>(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) > Convert<RetTy>()(b); \
|
||||
}; \
|
||||
} \
|
||||
bool operator>=(scalar_t a, const SymInt& b) { \
|
||||
return RetTy(a) >= Convert<RetTy>()(b); \
|
||||
};
|
||||
}
|
||||
|
||||
DEFINE_SYMINT_OP_INTONLY(int64_t, SymInt)
|
||||
DEFINE_SYMINT_OP_INTONLY(int32_t, SymInt)
|
||||
|
|
|
|||
|
|
@ -17,13 +17,13 @@
|
|||
C10_DEFINE_bool(
|
||||
caffe2_keep_on_shrink,
|
||||
true,
|
||||
"If set, keeps memory when a tensor is shrinking its size.");
|
||||
"If set, keeps memory when a tensor is shrinking its size.")
|
||||
|
||||
C10_DEFINE_int64(
|
||||
caffe2_max_keep_on_shrink_memory,
|
||||
LLONG_MAX,
|
||||
"The maximum memory in bytes to keep on shrink, if the difference between "
|
||||
"tensor sizes is bigger than this then tensor will be reset.");
|
||||
"tensor sizes is bigger than this then tensor will be reset.")
|
||||
|
||||
namespace c10 {
|
||||
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ struct NoopPyInterpreterVTable final : public PyInterpreterVTable {
|
|||
|
||||
void reset_backward_hooks(const TensorImpl* self) const override {
|
||||
PANIC(reset_backward_hooks);
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Construct this in Global scope instead of within `disarm`
|
||||
|
|
|
|||
|
|
@ -22,12 +22,12 @@
|
|||
C10_DEFINE_bool(
|
||||
caffe2_cpu_allocator_do_zero_fill,
|
||||
false,
|
||||
"If set, do memory zerofilling when allocating on CPU");
|
||||
"If set, do memory zerofilling when allocating on CPU")
|
||||
|
||||
C10_DEFINE_bool(
|
||||
caffe2_cpu_allocator_do_junk_fill,
|
||||
false,
|
||||
"If set, fill memory with deterministic junk when allocating on CPU");
|
||||
"If set, fill memory with deterministic junk when allocating on CPU")
|
||||
|
||||
namespace c10 {
|
||||
|
||||
|
|
|
|||
|
|
@ -170,5 +170,5 @@ C10_DEFINE_SHARED_REGISTRY(
|
|||
TaskThreadPoolBase,
|
||||
int,
|
||||
int,
|
||||
bool);
|
||||
bool)
|
||||
} // namespace c10
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ TORCH_SDT_DEFINE_SEMAPHORE(free)
|
|||
|
||||
namespace c10 {
|
||||
|
||||
C10_DEFINE_REGISTRY(FreeCudaMemoryCallbacksRegistry, FreeMemoryCallback);
|
||||
C10_DEFINE_REGISTRY(FreeCudaMemoryCallbacksRegistry, FreeMemoryCallback)
|
||||
|
||||
namespace cuda::CUDACachingAllocator {
|
||||
|
||||
|
|
@ -855,7 +855,7 @@ BlockState::BlockState(Block* block)
|
|||
TORCH_CHECK(
|
||||
block->event_count == 0,
|
||||
"Events should have synchronized when checkpointing block");
|
||||
};
|
||||
}
|
||||
|
||||
SegmentState::SegmentState(Block* head) {
|
||||
TORCH_INTERNAL_ASSERT(head->prev == nullptr && head->pool != nullptr);
|
||||
|
|
|
|||
|
|
@ -2,6 +2,6 @@
|
|||
|
||||
namespace c10::cuda::impl {
|
||||
|
||||
C10_REGISTER_GUARD_IMPL(CUDA, CUDAGuardImpl);
|
||||
C10_REGISTER_GUARD_IMPL(CUDA, CUDAGuardImpl)
|
||||
|
||||
} // namespace c10::cuda::impl
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ C10_DEFINE_bool(
|
|||
caffe2_use_fatal_for_enforce,
|
||||
false,
|
||||
"If set true, when CAFFE_ENFORCE is not met, abort instead "
|
||||
"of throwing an exception.");
|
||||
"of throwing an exception.")
|
||||
|
||||
namespace c10 {
|
||||
|
||||
|
|
@ -273,9 +273,9 @@ DECLARE_bool(logtostderr);
|
|||
// This backward compatibility flags are in order to deal with cases where
|
||||
// Caffe2 are not built with glog, but some init flags still pass in these
|
||||
// flags. They may go away in the future.
|
||||
C10_DEFINE_int32(minloglevel, 0, "Equivalent to glog minloglevel");
|
||||
C10_DEFINE_int32(v, 0, "Equivalent to glog verbose");
|
||||
C10_DEFINE_bool(logtostderr, false, "Equivalent to glog logtostderr");
|
||||
C10_DEFINE_int32(minloglevel, 0, "Equivalent to glog minloglevel")
|
||||
C10_DEFINE_int32(v, 0, "Equivalent to glog verbose")
|
||||
C10_DEFINE_bool(logtostderr, false, "Equivalent to glog logtostderr")
|
||||
#endif // !defined(c10_USE_GLOG)
|
||||
|
||||
#ifdef C10_USE_GLOG
|
||||
|
|
@ -375,7 +375,7 @@ void ShowLogInfoToStderr() {
|
|||
C10_DEFINE_int(
|
||||
caffe2_log_level,
|
||||
c10::GLOG_WARNING,
|
||||
"The minimum log level that caffe2 will output.");
|
||||
"The minimum log level that caffe2 will output.")
|
||||
|
||||
namespace c10 {
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ namespace c10 {
|
|||
|
||||
using std::string;
|
||||
|
||||
C10_DEFINE_REGISTRY(C10FlagsRegistry, C10FlagParser, const string&);
|
||||
C10_DEFINE_REGISTRY(C10FlagsRegistry, C10FlagParser, const string&)
|
||||
|
||||
namespace {
|
||||
static bool gCommandLineFlagsParsed = false;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/numa.h>
|
||||
|
||||
C10_DEFINE_bool(caffe2_cpu_numa_enabled, false, "Use NUMA whenever possible.");
|
||||
C10_DEFINE_bool(caffe2_cpu_numa_enabled, false, "Use NUMA whenever possible.")
|
||||
|
||||
#if defined(__linux__) && defined(C10_USE_NUMA) && !defined(C10_MOBILE)
|
||||
#include <numa.h>
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ CAFFE_DEFINE_KNOWN_TYPE(int*, int_ptr)
|
|||
|
||||
CAFFE_DEFINE_KNOWN_TYPE(
|
||||
detail::_guard_long_unique<long>,
|
||||
detail_guard_long_unique_long);
|
||||
detail_guard_long_unique_long)
|
||||
CAFFE_DEFINE_KNOWN_TYPE(
|
||||
detail::_guard_long_unique<std::vector<long>>,
|
||||
detail_guard_long_unique_std_vector_long)
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user