mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Fix pytorch linux build issues (#9273)
Summary: Breaking out of #8338 This fixes the build issues with pytorch on linux machines after BUILD_CAFFE2 and BUILD_ATEN are removed. cc orionr Pull Request resolved: https://github.com/pytorch/pytorch/pull/9273 Reviewed By: orionr Differential Revision: D8768869 Pulled By: mingzhe09088 fbshipit-source-id: 2730426ed1bed398eb5dc804c7348aeeb27c93d3
This commit is contained in:
parent
d0ad696f9d
commit
a70a90b28f
|
|
@ -201,12 +201,14 @@ if(NOT MSVC)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-field-initializers")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-field-initializers")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-type-limits")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-type-limits")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-array-bounds")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unknown-pragmas")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unknown-pragmas")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-sign-compare")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-sign-compare")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-variable")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-variable")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-function")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-function")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-result")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-result")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-strict-aliasing")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations")
|
||||||
# These flags are not available in GCC-4.8.5. Set only when using clang.
|
# These flags are not available in GCC-4.8.5. Set only when using clang.
|
||||||
# Compared against https://gcc.gnu.org/onlinedocs/gcc-4.8.5/gcc/Option-Summary.html
|
# Compared against https://gcc.gnu.org/onlinedocs/gcc-4.8.5/gcc/Option-Summary.html
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(real *input_p, real *o
|
||||||
{
|
{
|
||||||
int k;
|
int k;
|
||||||
int has_error = 0;
|
int has_error = 0;
|
||||||
THIndex_t error_index;
|
THIndex_t error_index = 0;
|
||||||
#pragma omp parallel for private(k)
|
#pragma omp parallel for private(k)
|
||||||
for (k = 0; k < nslices; k++)
|
for (k = 0; k < nslices; k++)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -67,7 +67,7 @@ static void THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
|
||||||
{
|
{
|
||||||
int k;
|
int k;
|
||||||
int has_error = 0;
|
int has_error = 0;
|
||||||
THIndex_t error_index;
|
THIndex_t error_index = 0;
|
||||||
#pragma omp parallel for private(k)
|
#pragma omp parallel for private(k)
|
||||||
for (k = 0; k < nslices; k++)
|
for (k = 0; k < nslices; k++)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,7 @@ Y: [0.3005476 1.551666 1.3591481 0.39191285 0.21866608]
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
)DOC")
|
)DOC")
|
||||||
.Input(0, "X", "*(type: Tensor<float\\>)* Input tensor.")
|
.Input(0, "X", "*(type: Tensor<float>)* Input tensor.")
|
||||||
.Output(
|
.Output(
|
||||||
0,
|
0,
|
||||||
"Y",
|
"Y",
|
||||||
|
|
|
||||||
|
|
@ -132,7 +132,7 @@ bool UnpackSegmentsOp<CPUContext>::DoRunWithType2() {
|
||||||
output->Resize(shape);
|
output->Resize(shape);
|
||||||
// create output tensor
|
// create output tensor
|
||||||
auto* out = static_cast<char*>(output->raw_mutable_data(data.meta()));
|
auto* out = static_cast<char*>(output->raw_mutable_data(data.meta()));
|
||||||
if (!(data.dim(0) * data.dim(1))) {
|
if (!(data.dim(0) && data.dim(1))) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
auto block_size = data.size_from_dim(2);
|
auto block_size = data.size_from_dim(2);
|
||||||
|
|
|
||||||
|
|
@ -385,7 +385,7 @@ lengths_out: [5]
|
||||||
.Output(
|
.Output(
|
||||||
0,
|
0,
|
||||||
"data_out",
|
"data_out",
|
||||||
"*(type: Tensor)* Padded data tensor ($T<N + 2*padding\\_width, "
|
"*(type: Tensor)* Padded data tensor ($T<N + 2*padding_width, "
|
||||||
"D_1, ..., D_n>$).")
|
"D_1, ..., D_n>$).")
|
||||||
.Output(
|
.Output(
|
||||||
1,
|
1,
|
||||||
|
|
@ -483,7 +483,7 @@ lengths_out_rm: [3]
|
||||||
0,
|
0,
|
||||||
"data_out",
|
"data_out",
|
||||||
"*(type: Tensor)* Padded data tensor "
|
"*(type: Tensor)* Padded data tensor "
|
||||||
"($T<N + 2*padding\\_width, D_1, ..., D_n>$).")
|
"($T<N + 2*padding_width, D_1, ..., D_n>$).")
|
||||||
.Output(
|
.Output(
|
||||||
1,
|
1,
|
||||||
"lengths_out",
|
"lengths_out",
|
||||||
|
|
|
||||||
|
|
@ -276,10 +276,11 @@ function(target_enable_style_warnings TARGET)
|
||||||
-Wmissing-include-dirs
|
-Wmissing-include-dirs
|
||||||
-Woverloaded-virtual
|
-Woverloaded-virtual
|
||||||
-Wredundant-decls
|
-Wredundant-decls
|
||||||
|
-Wno-shadow
|
||||||
-Wsign-promo
|
-Wsign-promo
|
||||||
-Wstrict-overflow=5
|
-Wstrict-overflow=5
|
||||||
-fdiagnostics-show-option
|
-fdiagnostics-show-option
|
||||||
-Wconversion
|
-Wno-conversion
|
||||||
-Wpedantic
|
-Wpedantic
|
||||||
-Wundef
|
-Wundef
|
||||||
)
|
)
|
||||||
|
|
@ -301,5 +302,8 @@ function(target_enable_style_warnings TARGET)
|
||||||
list(APPEND WARNING_OPTIONS "-Werror")
|
list(APPEND WARNING_OPTIONS "-Werror")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
if(APPLE)
|
||||||
|
set(WARNING_OPTIONS -Wno-gnu-zero-variadic-macro-arguments)
|
||||||
|
endif()
|
||||||
target_compile_options(${TARGET} PRIVATE ${WARNING_OPTIONS})
|
target_compile_options(${TARGET} PRIVATE ${WARNING_OPTIONS})
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user