mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Misc Windows lint
Summary: Closes https://github.com/caffe2/caffe2/pull/1656 Differential Revision: D6633052 Pulled By: Yangqing fbshipit-source-id: 5eeb3912fc769cfd06d252f3ed1d8d5f2a207cfc
This commit is contained in:
parent
1a0eefd5fc
commit
efa7c895f6
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -55,6 +55,7 @@
|
||||||
|
|
||||||
# Visual Studio Code files
|
# Visual Studio Code files
|
||||||
.vscode
|
.vscode
|
||||||
|
.vs
|
||||||
|
|
||||||
# OSX dir files
|
# OSX dir files
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
|
||||||
|
|
@ -108,7 +108,6 @@ struct DefCompiler {
|
||||||
return env[ident.name()];
|
return env[ident.name()];
|
||||||
}
|
}
|
||||||
void emitAssignment(const Assign& stmt) {
|
void emitAssignment(const Assign& stmt) {
|
||||||
OperatorDef* op;
|
|
||||||
std::vector<std::string> outputs;
|
std::vector<std::string> outputs;
|
||||||
for (auto lhs : stmt.lhs()) {
|
for (auto lhs : stmt.lhs()) {
|
||||||
std::string name = getLHS(lhs);
|
std::string name = getLHS(lhs);
|
||||||
|
|
@ -762,7 +761,7 @@ struct CompilationUnitImpl {
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class DefCompiler;
|
friend struct DefCompiler;
|
||||||
SymbolTable functions;
|
SymbolTable functions;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -835,7 +835,8 @@ void TensorPrinter::Print(const Tensor<CPUContext>& tensor) {
|
||||||
std::stringstream values_stream;
|
std::stringstream values_stream;
|
||||||
// One most likely doesn't want to print int64-number of items for visual
|
// One most likely doesn't want to print int64-number of items for visual
|
||||||
// inspection, so we cast down to int here.
|
// inspection, so we cast down to int here.
|
||||||
int total_count = std::min(tensor.size(), TIndex(limit_));
|
int total_count = static_cast<int>(
|
||||||
|
std::min(tensor.size(), TIndex(limit_)));
|
||||||
const T* tensor_data = tensor.template data<T>();
|
const T* tensor_data = tensor.template data<T>();
|
||||||
for (int i = 0; i < total_count - 1; ++i) {
|
for (int i = 0; i < total_count - 1; ++i) {
|
||||||
values_stream << tensor_data[i] << ",";
|
values_stream << tensor_data[i] << ",";
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,8 @@ class Timer {
|
||||||
*/
|
*/
|
||||||
inline void Start() { start_time_ = clock::now(); }
|
inline void Start() { start_time_ = clock::now(); }
|
||||||
inline float NanoSeconds() {
|
inline float NanoSeconds() {
|
||||||
return std::chrono::duration_cast<ns>(clock::now() - start_time_).count();
|
return static_cast<float>(
|
||||||
|
std::chrono::duration_cast<ns>(clock::now() - start_time_).count());
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* @brief Returns the elapsed time in milliseconds.
|
* @brief Returns the elapsed time in milliseconds.
|
||||||
|
|
|
||||||
|
|
@ -110,7 +110,7 @@ bool BatchBoxCoxOp<CPUContext>::DoRunWithType() {
|
||||||
const auto* lambda1_ptr = lambda1.template data<T>();
|
const auto* lambda1_ptr = lambda1.template data<T>();
|
||||||
const auto* lambda2_ptr = lambda2.template data<T>();
|
const auto* lambda2_ptr = lambda2.template data<T>();
|
||||||
|
|
||||||
const T k_eps = 1e-6;
|
const T k_eps = static_cast<T>(1e-6);
|
||||||
|
|
||||||
#ifdef CAFFE2_USE_MKL
|
#ifdef CAFFE2_USE_MKL
|
||||||
if (min_block_size_ < 1) {
|
if (min_block_size_ < 1) {
|
||||||
|
|
|
||||||
|
|
@ -99,7 +99,7 @@ bool L1DistanceGradientOp<float, CPUContext>::RunOnDevice() {
|
||||||
for (int j = 0; j < D; ++j) {
|
for (int j = 0; j < D; ++j) {
|
||||||
const float temp =
|
const float temp =
|
||||||
(X.data<float>())[offset + j] - (Y.data<float>())[offset + j];
|
(X.data<float>())[offset + j] - (Y.data<float>())[offset + j];
|
||||||
const float kEps = 1e-12;
|
const float kEps = 1e-12f;
|
||||||
if (temp < -kEps) {
|
if (temp < -kEps) {
|
||||||
dX->mutable_data<float>()[offset + j] = -(dDistance.data<float>())[i];
|
dX->mutable_data<float>()[offset + j] = -(dDistance.data<float>())[i];
|
||||||
dY->mutable_data<float>()[offset + j] = (dDistance.data<float>())[i];
|
dY->mutable_data<float>()[offset + j] = (dDistance.data<float>())[i];
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ class HSoftmaxOpBase : public Operator<Context> {
|
||||||
Tensor<Context> sum_multiplier_;
|
Tensor<Context> sum_multiplier_;
|
||||||
Tensor<Context> bias_multiplier_;
|
Tensor<Context> bias_multiplier_;
|
||||||
static constexpr T kLOG_THRESHOLD() {
|
static constexpr T kLOG_THRESHOLD() {
|
||||||
return 1e-20;
|
return 1e-20f;
|
||||||
}
|
}
|
||||||
static std::unordered_map<int, PathProto> getHierarchyForLabels(
|
static std::unordered_map<int, PathProto> getHierarchyForLabels(
|
||||||
int M,
|
int M,
|
||||||
|
|
@ -132,7 +132,7 @@ class HSoftmaxSearchOp final : public HSoftmaxOp<T, Context> {
|
||||||
HSoftmaxSearchOp(const OperatorDef& operator_def, Workspace* ws)
|
HSoftmaxSearchOp(const OperatorDef& operator_def, Workspace* ws)
|
||||||
: HSoftmaxOp<T, Context>(operator_def, ws),
|
: HSoftmaxOp<T, Context>(operator_def, ws),
|
||||||
top_n_(OperatorBase::GetSingleArgument<int>("topN", 5)),
|
top_n_(OperatorBase::GetSingleArgument<int>("topN", 5)),
|
||||||
beam_(OperatorBase::GetSingleArgument<float>("beam", 0.01)) {
|
beam_(OperatorBase::GetSingleArgument<float>("beam", 0.01f)) {
|
||||||
CAFFE_ENFORCE(tree_.ParseFromString(
|
CAFFE_ENFORCE(tree_.ParseFromString(
|
||||||
OperatorBase::GetSingleArgument<string>("tree", "")));
|
OperatorBase::GetSingleArgument<string>("tree", "")));
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ class IndexHashOp : public Operator<Context> {
|
||||||
for (int i = 0; i < sizeof(T) / sizeof(int8_t); i++) {
|
for (int i = 0; i < sizeof(T) / sizeof(int8_t); i++) {
|
||||||
hashed = hashed * 65537 + bytes[i];
|
hashed = hashed * 65537 + bytes[i];
|
||||||
}
|
}
|
||||||
hashed = (modulo_ + hashed % modulo_) % modulo_;
|
hashed = static_cast<T>((modulo_ + hashed % modulo_) % modulo_);
|
||||||
return hashed;
|
return hashed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ class InstanceNormOp : public Operator<Context> {
|
||||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||||
InstanceNormOp(const OperatorDef& operator_def, Workspace* ws)
|
InstanceNormOp(const OperatorDef& operator_def, Workspace* ws)
|
||||||
: Operator<Context>(operator_def, ws),
|
: Operator<Context>(operator_def, ws),
|
||||||
epsilon_(OperatorBase::GetSingleArgument<T>("epsilon", 1e-5)),
|
epsilon_(OperatorBase::GetSingleArgument<T>("epsilon", 1e-5f)),
|
||||||
order_(StringToStorageOrder(
|
order_(StringToStorageOrder(
|
||||||
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
|
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
|
||||||
CAFFE_ENFORCE(epsilon_ >= 0, "Must pass a nonnegative epsilon.");
|
CAFFE_ENFORCE(epsilon_ >= 0, "Must pass a nonnegative epsilon.");
|
||||||
|
|
@ -69,7 +69,7 @@ class InstanceNormGradientOp : public Operator<Context> {
|
||||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||||
InstanceNormGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
InstanceNormGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
||||||
: Operator<Context>(operator_def, ws),
|
: Operator<Context>(operator_def, ws),
|
||||||
epsilon_(OperatorBase::GetSingleArgument<T>("epsilon", 1e-5)),
|
epsilon_(OperatorBase::GetSingleArgument<T>("epsilon", 1e-5f)),
|
||||||
order_(StringToStorageOrder(
|
order_(StringToStorageOrder(
|
||||||
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
|
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
|
||||||
CAFFE_ENFORCE(epsilon_ >= 0, "Must pass a nonnegative epsilon.");
|
CAFFE_ENFORCE(epsilon_ >= 0, "Must pass a nonnegative epsilon.");
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,7 @@ class LayerNormGradientOp : public Operator<Context> {
|
||||||
LayerNormGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
LayerNormGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
||||||
: Operator<Context>(operator_def, ws),
|
: Operator<Context>(operator_def, ws),
|
||||||
axis_(OperatorBase::GetSingleArgument<int>("axis", 1)),
|
axis_(OperatorBase::GetSingleArgument<int>("axis", 1)),
|
||||||
epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 0.001)) {}
|
epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 0.001f)) {}
|
||||||
~LayerNormGradientOp() {}
|
~LayerNormGradientOp() {}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@
|
||||||
namespace caffe2 {
|
namespace caffe2 {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
const float kEqualityThreshold = 1e-10;
|
const float kEqualityThreshold = 1e-10f;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@
|
||||||
namespace caffe2 {
|
namespace caffe2 {
|
||||||
struct LogitCPUFunctor {
|
struct LogitCPUFunctor {
|
||||||
explicit LogitCPUFunctor(OperatorBase& op)
|
explicit LogitCPUFunctor(OperatorBase& op)
|
||||||
: eps_(op.GetSingleArgument<float>("eps", 1e-6)) {
|
: eps_(op.GetSingleArgument<float>("eps", 1e-6f)) {
|
||||||
CAFFE_ENFORCE_GT(eps_, 0.0);
|
CAFFE_ENFORCE_GT(eps_, 0.0);
|
||||||
CAFFE_ENFORCE_LT(eps_, 0.5);
|
CAFFE_ENFORCE_LT(eps_, 0.5);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ class LogitGradientOp final : public Operator<Context> {
|
||||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||||
LogitGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
LogitGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
||||||
: Operator<Context>(operator_def, ws),
|
: Operator<Context>(operator_def, ws),
|
||||||
eps_(OperatorBase::GetSingleArgument<float>("eps", 1e-6)) {}
|
eps_(OperatorBase::GetSingleArgument<float>("eps", 1e-6f)) {}
|
||||||
~LogitGradientOp() {}
|
~LogitGradientOp() {}
|
||||||
|
|
||||||
bool RunOnDevice() override;
|
bool RunOnDevice() override;
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ bool LpNormGradientOp<float, CPUContext>::RunOnDevice() {
|
||||||
CAFFE_ENFORCE_EQ(dnorm.ndim(), 1);
|
CAFFE_ENFORCE_EQ(dnorm.ndim(), 1);
|
||||||
CAFFE_ENFORCE_EQ(dnorm.dim32(0), 1);
|
CAFFE_ENFORCE_EQ(dnorm.dim32(0), 1);
|
||||||
dX->ResizeLike(X);
|
dX->ResizeLike(X);
|
||||||
const float kEps = 1e-12;
|
const float kEps = 1e-12f;
|
||||||
|
|
||||||
if (p_ == 1) {
|
if (p_ == 1) {
|
||||||
// Todo: implement in eigen
|
// Todo: implement in eigen
|
||||||
|
|
@ -60,7 +60,7 @@ bool LpNormGradientOp<float, CPUContext>::RunOnDevice() {
|
||||||
}
|
}
|
||||||
} else if (p_ == 2) {
|
} else if (p_ == 2) {
|
||||||
EigenVectorMap<float>(dX->mutable_data<float>(), X.size()).array() =
|
EigenVectorMap<float>(dX->mutable_data<float>(), X.size()).array() =
|
||||||
ConstEigenVectorMap<float>(X.data<float>(), X.size()).array() * 2.0 *
|
ConstEigenVectorMap<float>(X.data<float>(), X.size()).array() * 2.0f *
|
||||||
(dnorm.data<float>())[0];
|
(dnorm.data<float>())[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ class RMACRegionsOp final : public Operator<Context> {
|
||||||
RMACRegionsOp(const OperatorDef& operator_def, Workspace* ws)
|
RMACRegionsOp(const OperatorDef& operator_def, Workspace* ws)
|
||||||
: Operator<Context>(operator_def, ws),
|
: Operator<Context>(operator_def, ws),
|
||||||
scales_(OperatorBase::GetSingleArgument<int>("scales", 3)),
|
scales_(OperatorBase::GetSingleArgument<int>("scales", 3)),
|
||||||
overlap_(OperatorBase::GetSingleArgument<float>("overlap", 0.4)) {}
|
overlap_(OperatorBase::GetSingleArgument<float>("overlap", 0.4f)) {}
|
||||||
|
|
||||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -31,16 +31,22 @@ void GatherPaddingOp<CPUContext>::GatherPadding(
|
||||||
const int* lengths_ptr,
|
const int* lengths_ptr,
|
||||||
T* padding_start_ptr,
|
T* padding_start_ptr,
|
||||||
T* padding_end_ptr) {
|
T* padding_end_ptr) {
|
||||||
|
CAFFE_ENFORCE(
|
||||||
|
(!std::is_same<bool, T>::value),
|
||||||
|
"GatherPadding should not be executed on an input of type bool, as "
|
||||||
|
"addition is not properly defined with booleans.");
|
||||||
int64_t total_length = 0;
|
int64_t total_length = 0;
|
||||||
for (int i = 0; i < lengths_size; ++i) {
|
for (int i = 0; i < lengths_size; ++i) {
|
||||||
// check total length consistency
|
// check total length consistency
|
||||||
const auto length = lengths_ptr[i];
|
const auto length = lengths_ptr[i];
|
||||||
total_length += length;
|
total_length += length;
|
||||||
CAFFE_ENFORCE_LE(total_length, outer_size);
|
CAFFE_ENFORCE_LE(total_length, outer_size);
|
||||||
|
|
||||||
// accumulate start paddings
|
// accumulate start paddings
|
||||||
for (int j = 0; j < startPaddingWidth_; ++j) {
|
for (int j = 0; j < startPaddingWidth_; ++j) {
|
||||||
for (int k = 0; k < block_size; ++k) {
|
for (int k = 0; k < block_size; ++k) {
|
||||||
|
// Note: MSVC warns about unsafe use of type bool in operation.
|
||||||
|
// This is now guarded by a CAFFE_ENFORCE so we can suppress it.
|
||||||
|
#pragma warning(suppress: 4804)
|
||||||
padding_start_ptr[k] += in_ptr[k];
|
padding_start_ptr[k] += in_ptr[k];
|
||||||
}
|
}
|
||||||
in_ptr += block_size;
|
in_ptr += block_size;
|
||||||
|
|
@ -49,6 +55,7 @@ void GatherPaddingOp<CPUContext>::GatherPadding(
|
||||||
// accumulate end paddings
|
// accumulate end paddings
|
||||||
for (int j = 0; j < endPaddingWidth_; ++j) {
|
for (int j = 0; j < endPaddingWidth_; ++j) {
|
||||||
for (int k = 0; k < block_size; ++k) {
|
for (int k = 0; k < block_size; ++k) {
|
||||||
|
#pragma warning(suppress: 4804)
|
||||||
padding_end_ptr[k] += in_ptr[k];
|
padding_end_ptr[k] += in_ptr[k];
|
||||||
}
|
}
|
||||||
in_ptr += block_size;
|
in_ptr += block_size;
|
||||||
|
|
|
||||||
|
|
@ -62,7 +62,7 @@ void SoftmaxCPU(
|
||||||
for (int i = 0; i < N; ++i) {
|
for (int i = 0; i < N; ++i) {
|
||||||
for (int j = 0; j < D; ++j) {
|
for (int j = 0; j < D; ++j) {
|
||||||
Ydata[i * D + j] =
|
Ydata[i * D + j] =
|
||||||
Xdata[i * D + j] - rowmax[i] - log(fmaxf(scale[i], 1e-20));
|
Xdata[i * D + j] - rowmax[i] - log(fmaxf(scale[i], 1e-20f));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -30,8 +30,8 @@ class SpatialBNOp : public Operator<Context> {
|
||||||
SpatialBNOp(const OperatorDef& operator_def, Workspace* ws)
|
SpatialBNOp(const OperatorDef& operator_def, Workspace* ws)
|
||||||
: Operator<Context>(operator_def, ws),
|
: Operator<Context>(operator_def, ws),
|
||||||
is_test_(OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)),
|
is_test_(OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)),
|
||||||
epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 1e-5)),
|
epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 1e-5f)),
|
||||||
momentum_(OperatorBase::GetSingleArgument<float>("momentum", 0.9)),
|
momentum_(OperatorBase::GetSingleArgument<float>("momentum", 0.9f)),
|
||||||
order_(StringToStorageOrder(
|
order_(StringToStorageOrder(
|
||||||
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
|
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
|
||||||
// TODO(jiayq): update the input and output size checks.
|
// TODO(jiayq): update the input and output size checks.
|
||||||
|
|
@ -63,7 +63,7 @@ class SpatialBNGradientOp : public Operator<Context> {
|
||||||
SpatialBNGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
SpatialBNGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
||||||
: Operator<Context>(operator_def, ws),
|
: Operator<Context>(operator_def, ws),
|
||||||
is_test_(OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)),
|
is_test_(OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)),
|
||||||
epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 1e-5)),
|
epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 1e-5f)),
|
||||||
order_(StringToStorageOrder(
|
order_(StringToStorageOrder(
|
||||||
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
|
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
|
||||||
CAFFE_ENFORCE(InputSize() == 5);
|
CAFFE_ENFORCE(InputSize() == 5);
|
||||||
|
|
|
||||||
|
|
@ -228,7 +228,7 @@ class WeightedSampleDequeueBlobsOp final : public Operator<Context> {
|
||||||
}
|
}
|
||||||
std::partial_sum(cumProbs_.begin(), cumProbs_.end(), cumProbs_.begin());
|
std::partial_sum(cumProbs_.begin(), cumProbs_.end(), cumProbs_.begin());
|
||||||
// Put last value to be 1.0001 to avoid numerical issues.
|
// Put last value to be 1.0001 to avoid numerical issues.
|
||||||
cumProbs_.back() = 1.0001;
|
cumProbs_.back() = 1.0001f;
|
||||||
|
|
||||||
LOG(INFO) << "Dequeue weights: " << weights;
|
LOG(INFO) << "Dequeue weights: " << weights;
|
||||||
LOG(INFO) << "cumProbs: " << cumProbs_;
|
LOG(INFO) << "cumProbs: " << cumProbs_;
|
||||||
|
|
|
||||||
|
|
@ -106,16 +106,27 @@ cmake_pop_check_state()
|
||||||
# "THIRD_PARTY_NAME related"
|
# "THIRD_PARTY_NAME related"
|
||||||
if (${CMAKE_CXX_COMPILER_ID} STREQUAL "MSVC")
|
if (${CMAKE_CXX_COMPILER_ID} STREQUAL "MSVC")
|
||||||
add_compile_options(
|
add_compile_options(
|
||||||
/wd4018 # (3): Signed/unsigned mismatch
|
##########################################
|
||||||
|
# Third party related. Cannot remove.
|
||||||
|
##########################################
|
||||||
/wd4065 # (3): switch with default but no case. Protobuf related.
|
/wd4065 # (3): switch with default but no case. Protobuf related.
|
||||||
/wd4244 # (2/3/4): Possible loss of precision
|
/wd4503 # (1): decorated name length exceeded, name was truncated.
|
||||||
/wd4267 # (3): Conversion of size_t to smaller type. Possible loss of data.
|
# Eigen related.
|
||||||
/wd4503 # (1): decorated name length exceeded, name was truncated. Eigen related.
|
|
||||||
/wd4506 # (1): no definition for inline function. Protobuf related.
|
/wd4506 # (1): no definition for inline function. Protobuf related.
|
||||||
/wd4554 # (3): check operator precedence for possible error. Eigen related.
|
/wd4554 # (3): check operator precedence for possible error.
|
||||||
/wd4800 # (3): Forcing non-boolean value to true or false.
|
# Eigen related.
|
||||||
/wd4996 # (3): Use of a deprecated member
|
##########################################
|
||||||
|
# These are directly Caffe2 related.
|
||||||
|
##########################################
|
||||||
|
/wd4018 # (3): Signed/unsigned mismatch. We've used it in many places of
|
||||||
|
# the code and it would be hard to correct all.
|
||||||
|
/wd4244 # (2/3/4): Possible loss of precision. Various cases where we
|
||||||
|
# implicitly cast TIndex to int etc. Need further cleaning.
|
||||||
|
/wd4267 # (3): Conversion of size_t to smaller type. Same reason as 4244.
|
||||||
|
/wd4996 # (3): Use of deprecated POSIX functions. Since we develop
|
||||||
|
# mainly on Linux, this is ignored.
|
||||||
)
|
)
|
||||||
|
|
||||||
# Exception handing for compiler warining C4530, see
|
# Exception handing for compiler warining C4530, see
|
||||||
# https://msdn.microsoft.com/en-us/library/2axwkyt4.aspx
|
# https://msdn.microsoft.com/en-us/library/2axwkyt4.aspx
|
||||||
add_definitions("/EHsc")
|
add_definitions("/EHsc")
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user