Fix sign-compare in caffe2

Prerequisite change for enabling `-Werror=sign-compare` across PyTorch repo

Pull Request resolved: https://github.com/pytorch/pytorch/pull/75082

Approved by: https://github.com/ngimel
This commit is contained in:
Nikita Shulga 2022-04-01 19:29:35 -07:00 committed by PyTorch MergeBot
parent c593c220ff
commit 6d85e7dafa
6 changed files with 16 additions and 19 deletions

View File

@ -60,8 +60,7 @@ class C10_EXPORT QTensor {
void Resize(at::ArrayRef<int> dim_source) {
if (dims_ != dim_source) {
const auto source_size = c10::multiply_integers(dim_source);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
if ((source_size * (precision_ + signed_)) > capacity_) {
if (static_cast<size_t>(source_size * (precision_ + signed_)) > capacity_) {
data_ptr_.clear();
capacity_ = 0;
}

View File

@ -50,8 +50,7 @@ std::vector<int> nms_cpu_upright(
std::vector<int> keep;
while (order.size() > 0) {
// exit if already enough proposals
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
if (topN >= 0 && keep.size() >= topN) {
if (topN >= 0 && keep.size() >= static_cast<size_t>(topN)) {
break;
}
@ -127,7 +126,7 @@ std::vector<int> soft_nms_cpu_upright(
EArrXi pending = AsEArrXt(indices);
while (pending.size() > 0) {
// Exit if already enough proposals
if (topN >= 0 && keep.size() >= topN) {
if (topN >= 0 && keep.size() >= static_cast<unsigned>(topN)) {
break;
}
@ -560,8 +559,7 @@ std::vector<int> nms_cpu_rotated(
std::vector<int> keep;
while (order.size() > 0) {
// exit if already enough proposals
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
if (topN >= 0 && keep.size() >= topN) {
if (topN >= 0 && keep.size() >= static_cast<size_t>(topN)) {
break;
}
@ -626,7 +624,7 @@ std::vector<int> soft_nms_cpu_rotated(
EArrXi pending = AsEArrXt(indices);
while (pending.size() > 0) {
// Exit if already enough proposals
if (topN >= 0 && keep.size() >= topN) {
if (topN >= 0 && keep.size() >= static_cast<size_t>(topN)) {
break;
}

View File

@ -56,7 +56,7 @@ struct TORCH_API CharRange {
struct TORCH_API StringProvider {
virtual void operator()(CharRange&) = 0;
virtual void reset() = 0;
virtual ~StringProvider() {}
virtual ~StringProvider() = default;
};
class TORCH_API BufferedTokenizer {
@ -99,7 +99,7 @@ class TORCH_API BufferedTokenizer {
StringProvider* provider_;
Tokenizer tokenizer_;
TokenizedString tokenized_;
int tokenIndex_;
unsigned tokenIndex_;
int numPasses_;
int pass_{0};
};

View File

@ -18,7 +18,7 @@ void adagrad_update__avx2_fma(
float decay,
float lr,
float weight_decay = 0.f) {
constexpr size_t kSize = 8;
constexpr int kSize = 8;
auto i = 0;
for (; i + kSize <= N; i += kSize) {
__m256 gi = _mm256_loadu_ps(g + i);

View File

@ -300,7 +300,7 @@ class GetPythonGradient : public GradientMakerBase {
}
if (gradOutputIndices.size() > 0) {
// NOLINTNEXTLINE(modernize-loop-convert)
for (int i = 0; i < gradOutputIndices.size(); ++i) {
for (unsigned i = 0; i < gradOutputIndices.size(); ++i) {
int GO_i = gradOutputIndices[i];
gradientInputs.push_back(GO(GO_i));
}
@ -312,7 +312,7 @@ class GetPythonGradient : public GradientMakerBase {
std::vector<std::string> gradientOutputs;
if (gradInputIndices.size() > 0) {
// NOLINTNEXTLINE(modernize-loop-convert)
for (int i = 0; i < gradInputIndices.size(); ++i) {
for (unsigned i = 0; i < gradInputIndices.size(); ++i) {
int GI_i = gradInputIndices[i];
gradientOutputs.push_back(GI(GI_i));
}
@ -877,7 +877,7 @@ void addObjectMethods(py::module& m) {
std::vector<TensorCPU> tensors_data;
#ifdef USE_NUMPY
// NOLINTNEXTLINE(modernize-loop-convert)
for (auto i = 0; i < inputs.size(); ++i) {
for (auto i = 0U; i < inputs.size(); ++i) {
auto input = inputs[i];
CAFFE_ENFORCE(
PyArray_Check(input.ptr()),
@ -988,7 +988,7 @@ void addObjectMethods(py::module& m) {
std::vector<Tensor> tensors_data;
#ifdef USE_NUMPY
// NOLINTNEXTLINE(modernize-loop-convert)
for (auto i = 0; i < inputs.size(); ++i) {
for (auto i = 0U; i < inputs.size(); ++i) {
auto input = inputs[i];
CAFFE_ENFORCE(
PyArray_Check(input.ptr()),
@ -1201,7 +1201,7 @@ void addGlobalMethods(py::module& m) {
});
m.def("nearby_opnames", [](const std::string& name) {
std::vector<std::string> alternatives;
int editTolerance = 3;
unsigned editTolerance = 3;
// NOLINTNEXTLINE(performance-for-range-copy)
for (auto it : caffe2::CPUOperatorRegistry()->Keys()) {
if (editDistance(it, name, editTolerance) < editTolerance + 1) {

View File

@ -195,7 +195,7 @@ void PerfNetObserver::Start() {
int skipIters = ObserverConfig::getSkipIters();
int sampleRate = visitCount > 0 ? netFollowupSampleRate : netInitSampleRate;
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
if (skipIters <= numRuns_ && sampleRate > 0 && rand() % sampleRate == 0) {
if (skipIters <= static_cast<int>(numRuns_) && sampleRate > 0 && rand() % sampleRate == 0) {
visitCount++;
if (visitCount == netFollowupSampleCount) {
visitCount = 0;
@ -238,9 +238,9 @@ void PerfNetObserver::Stop() {
if (logType_ == PerfNetObserver::OPERATOR_DELAY) {
const auto& operators = subject_->GetOperators();
for (int idx = 0; idx < operators.size(); ++idx) {
for (unsigned idx = 0; idx < operators.size(); ++idx) {
const auto* op = operators[idx];
auto name = getObserverName(op, idx);
auto name = getObserverName(op, static_cast<int>(idx));
PerformanceInformation p;
const PerfOperatorObserver* opObserver =
static_cast<const PerfOperatorObserver*>(observerMap_[op]);