From 606b234336228828b42c83ac4bc04511b6cfa9c0 Mon Sep 17 00:00:00 2001 From: Michael Andreas Dagitses Date: Thu, 9 Jun 2022 13:15:50 -0700 Subject: [PATCH] turn on -Werror=unused-function in our Bazel CPU build Summary: We also fix any existing issues. Note that we only do this for the CPU build because nvcc is considered a C++ toolchain but it does not have the same flag support. Adding flags to the GPU build will cause nvcc errors. Test Plan: Built locally, rely on CI to confirm. Reviewers: malfet Subscribers: Tasks: Tags: Pull Request resolved: https://github.com/pytorch/pytorch/pull/79154 Approved by: https://github.com/seemethere, https://github.com/osalpekar, https://github.com/albanD --- .bazelrc | 18 +++++- aten/src/ATen/NamedTensorUtils.cpp | 57 ----------------- aten/src/ATen/native/BinaryOps.cpp | 20 ------ aten/src/ATen/native/ReduceOps.cpp | 12 ---- .../ATen/native/quantized/AffineQuantizer.cpp | 5 -- c10/test/util/exception_test.cpp | 8 +-- caffe2/ideep/operators/adam_op.cc | 25 -------- caffe2/opt/onnxifi_transformer.cc | 22 ------- test/cpp/jit/torch_python_test.cpp | 41 ++++++------ test/cpp/tensorexpr/test_conv.cpp | 4 +- test/cpp/tensorexpr/tutorial.cpp | 4 ++ torch/csrc/DynamicTypes.cpp | 16 ----- torch/csrc/Storage.cpp | 14 ---- torch/csrc/autograd/init.cpp | 12 ---- torch/csrc/jit/codegen/cuda/codegen.cpp | 6 -- torch/csrc/jit/codegen/cuda/index_compute.cpp | 13 ---- torch/csrc/jit/codegen/cuda/ir_nodes.cpp | 12 ---- torch/csrc/jit/codegen/cuda/kernel_cache.cpp | 5 -- .../csrc/jit/codegen/cuda/lower_expr_sort.cpp | 64 ++++++++++--------- torch/csrc/jit/codegen/cuda/parser.cpp | 4 -- .../mobile/compatibility/backport_manager.cpp | 15 ----- torch/csrc/jit/mobile/module.cpp | 3 + .../jit/passes/onnx/shape_type_inference.cpp | 21 ------ torch/csrc/jit/runtime/static/ops.cpp | 6 -- .../csrc/jit/serialization/export_module.cpp | 33 ---------- torch/csrc/utils/tensor_new.cpp | 14 ---- 26 files changed, 82 insertions(+), 372 deletions(-) diff --git a/.bazelrc b/.bazelrc index 60d6fda7a70..6b97431739e 100644 --- a/.bazelrc +++ b/.bazelrc @@ -49,5 +49,19 @@ build:cpu-only --@rules_cuda//cuda:enable_cuda=False # On the bright side, this means we don't have to more broadly apply # the exceptions to an entire target. build \ - --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \ - --per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits + --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \ + --per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits \ + --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=unused-function \ + --per_file_copt=^//.*\.cu$@--compiler-options=-Werror=unused-function + +build \ + --per_file_copt=//:aten/src/ATen/RegisterCompositeExplicitAutograd.cpp@-Wno-error=unused-function \ + --per_file_copt=//:aten/src/ATen/RegisterCompositeImplicitAutograd.cpp@-Wno-error=unused-function \ + --per_file_copt=//:aten/src/ATen/RegisterMkldnnCPU.cpp$@-Wno-error=unused-function \ + --per_file_copt=//:aten/src/ATen/RegisterNestedTensorCPU.cpp$@-Wno-error=unused-function \ + --per_file_copt=//:aten/src/ATen/RegisterQuantizedCPU.cpp$@-Wno-error=unused-function \ + --per_file_copt=//:aten/src/ATen/RegisterSparseCPU.cpp$@-Wno-error=unused-function \ + --per_file_copt=//:aten/src/ATen/RegisterSparseCsrCPU.cpp$@-Wno-error=unused-function \ + --per_file_copt=//:aten/src/ATen/RegisterZeroTensor.cpp$@-Wno-error=unused-function \ + --per_file_copt=//:torch/csrc/lazy/generated/RegisterAutogradLazy.cpp@-Wno-error=unused-function \ + --per_file_copt=//:torch/csrc/lazy/generated/RegisterLazy.cpp@-Wno-error=unused-function diff --git a/aten/src/ATen/NamedTensorUtils.cpp b/aten/src/ATen/NamedTensorUtils.cpp index 31ff41efb13..ca38f7be31b 100644 --- a/aten/src/ATen/NamedTensorUtils.cpp +++ b/aten/src/ATen/NamedTensorUtils.cpp @@ -260,33 +260,6 @@ std::vector compute_diagonal_outnames( return outnames; } -// tensor_dotted_dim and other_dotted_dim are the dimensions of the two -// tensors that we contract together. Usually other_dotted_dim is 0 -// and tensor_dotted_dim is the last dim of tensor, but there are some special -// cases like einsum and tensordot where one can contract arbitrary dims. -// NOLINTNEXTLINE(clang-diagnostic-unused-function) -static std::vector compute_dot_product_outnames( - DimnameList tensor_names, - int64_t tensor_dotted_dim, - DimnameList other_names, - int64_t other_dotted_dim) { - int64_t num_outnames = tensor_names.size() + other_names.size() - 2; - if (num_outnames == 0) { - return {}; - } - std::vector outnames(num_outnames, Dimname::wildcard()); - int64_t index = 0; - for (const auto j : c10::irange(static_cast(tensor_names.size()))) { - if (j == tensor_dotted_dim) continue; - outnames[index++] = tensor_names[j]; - } - for (const auto j : c10::irange(static_cast(other_names.size()))) { - if (j == other_dotted_dim) continue; - outnames[index++] = other_names[j]; - } - return outnames; -} - static void check_feature_names_are_distinct( DimnameList self_names, DimnameList other_names, @@ -306,36 +279,6 @@ static void check_feature_names_are_distinct( ". Please rename the input tensors with `Tensor.rename` to prevent this."); } -// NOLINTNEXTLINE(clang-diagnostic-unused-function) -static DimnameList batch_dims(DimnameList names) { - if (names.size() <= 2) { - return {}; - } - return DimnameList(names.begin(), names.end() - 2); -} - -// NOLINTNEXTLINE(clang-diagnostic-unused-function) -static DimnameList feature_dims(DimnameList names) { - if (names.size() <= 2) { - return names; - } - return DimnameList(names.end() - 2, 2); -} - -// NOLINTNEXTLINE(clang-diagnostic-unused-function) -static bool are_distinct(DimnameList batch_dims, DimnameList feature_dims) { - for (const auto& target : feature_dims) { - if (target.isWildcard()) { - continue; - } - if (std::any_of(batch_dims.begin(), batch_dims.end(), - [&](const Dimname& dim) { return target == dim; })) { - return false; - } - } - return true; -} - static int64_t num_batch_dims(DimnameList names) { if (names.size() <= 2) { return 0; diff --git a/aten/src/ATen/native/BinaryOps.cpp b/aten/src/ATen/native/BinaryOps.cpp index 5a4800e7d65..807170026a2 100644 --- a/aten/src/ATen/native/BinaryOps.cpp +++ b/aten/src/ATen/native/BinaryOps.cpp @@ -12,26 +12,6 @@ #include namespace at { -namespace native { - -// These are still needed because we don't have C++ conversions from number -// types (int, float, etc.) to Tensor (only to Scalar). They're not exposed -// to Python. - -static void check_convert(const Scalar& scalar, ScalarType scalarType) { - // Validate that is possible to convert scalar to tensor dtype without - // overflow - AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( - at::ScalarType::Bool, - at::ScalarType::BFloat16, - at::ScalarType::Half, - at::ScalarType::ComplexHalf, - scalarType, - "check_convert", - [&] { scalar.to(); }); -} - -} // namespace native namespace meta { diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp index 2015c161d2e..a62c89863e2 100644 --- a/aten/src/ATen/native/ReduceOps.cpp +++ b/aten/src/ATen/native/ReduceOps.cpp @@ -1111,18 +1111,6 @@ Tensor nansum(const Tensor& self, IntArrayRef dim, bool keepdim, c10::optional opt_dtype) { - ScalarType dtype = get_dtype_from_result(result, opt_dtype); - auto iter = make_reduction("prod", result, self, dim, keepdim, dtype); - if (iter.numel() == 0) { - result.fill_(1); - } else { - prod_stub(iter.device_type(), iter); - } - return result; -} - // NOTE: this could be implemented via diag and sum, but this has perf problems, // see https://github.com/pytorch/pytorch/pull/47305, Tensor trace_cpu(const Tensor& self) { diff --git a/aten/src/ATen/native/quantized/AffineQuantizer.cpp b/aten/src/ATen/native/quantized/AffineQuantizer.cpp index 0e15534b227..e2fa8f65adc 100644 --- a/aten/src/ATen/native/quantized/AffineQuantizer.cpp +++ b/aten/src/ATen/native/quantized/AffineQuantizer.cpp @@ -35,11 +35,6 @@ void checkRoundingMode(const std::string& fn_name) { return; } -void checkCPUTensor(const std::string& fn_name, const Tensor& t) { - TORCH_CHECK( - t.device().type() == kCPU, fn_name, " only supports CPU device type."); -} - void checkFloatTensor(const std::string& fn_name, const Tensor& t) { TORCH_CHECK( t.scalar_type() == kFloat, fn_name, " expects a Float Tensor, got ", diff --git a/c10/test/util/exception_test.cpp b/c10/test/util/exception_test.cpp index af06b4cb90a..0fc7abe982e 100644 --- a/c10/test/util/exception_test.cpp +++ b/c10/test/util/exception_test.cpp @@ -5,9 +5,6 @@ using c10::Error; namespace { -bool throw_func() { - throw std::runtime_error("I'm throwing..."); -} template inline void expectThrowsEq(Functor&& functor, const char* expectedMessage) { @@ -26,9 +23,10 @@ TEST(ExceptionTest, TORCH_INTERNAL_ASSERT_DEBUG_ONLY) { #ifdef NDEBUG // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false)); - // Does nothing - `throw_func()` should not be evaluated + // Does nothing - `throw ...` should not be evaluated // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) - ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(throw_func())); + ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + (throw std::runtime_error("I'm throwing..."), true))); #else ASSERT_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false), c10::Error); ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(true)); diff --git a/caffe2/ideep/operators/adam_op.cc b/caffe2/ideep/operators/adam_op.cc index a8d43f46c2c..8228e7d92cf 100644 --- a/caffe2/ideep/operators/adam_op.cc +++ b/caffe2/ideep/operators/adam_op.cc @@ -4,31 +4,6 @@ using namespace caffe2; namespace { -// NOLINTNEXTLINE(clang-diagnostic-unused-function) -void adam_ideep_update( - int N, - const float* g, - const float* m, - const float* v, - float* ng, - float* nm, - float* nv, - float beta1, - float beta2, - float eps_hat, - float correction, - const float* lr) { -#ifdef _OPENMP - #pragma omp parallel for schedule(static) -#endif - for (auto i = 0; i < N; ++i) { - float gi = g[i]; - float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1); - float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2); - ng[i] = lr[0] * correction * mi / (std::sqrt(vi) + eps_hat); - } -} - void adam_ideep_compute( int N, const float* w, diff --git a/caffe2/opt/onnxifi_transformer.cc b/caffe2/opt/onnxifi_transformer.cc index 2af9088b988..0ccbc5e5c28 100644 --- a/caffe2/opt/onnxifi_transformer.cc +++ b/caffe2/opt/onnxifi_transformer.cc @@ -31,28 +31,6 @@ std::unordered_map stripShapeInfoMap( return shape_map; } -// NOLINTNEXTLINE(clang-diagnostic-unused-function) -uint64_t onnxifiDataType(caffe2::TensorProto::DataType t) { -#define CAFFE2_TO_ONNXIFI_TYPE(x, y) \ - case (caffe2::TensorProto::x): \ - return y - switch (t) { - CAFFE2_TO_ONNXIFI_TYPE(FLOAT, ONNXIFI_DATATYPE_FLOAT32); - CAFFE2_TO_ONNXIFI_TYPE(INT8, ONNXIFI_DATATYPE_INT8); - CAFFE2_TO_ONNXIFI_TYPE(UINT8, ONNXIFI_DATATYPE_UINT8); - CAFFE2_TO_ONNXIFI_TYPE(INT16, ONNXIFI_DATATYPE_INT16); - CAFFE2_TO_ONNXIFI_TYPE(UINT16, ONNXIFI_DATATYPE_UINT16); - CAFFE2_TO_ONNXIFI_TYPE(INT32, ONNXIFI_DATATYPE_INT32); - CAFFE2_TO_ONNXIFI_TYPE(INT64, ONNXIFI_DATATYPE_INT64); - CAFFE2_TO_ONNXIFI_TYPE(FLOAT16, ONNXIFI_DATATYPE_FLOAT16); - default: - LOG(WARNING) << "Unsupported Caffe2 tensor type: " << t - << ", fallback to FLOAT"; - return ONNXIFI_DATATYPE_FLOAT32; - } -#undef CAFFE2_TO_ONNXIFI_TYPE -} - std::vector<::ONNX_NAMESPACE::ValueInfoProto> convertToValueInfo( const std::vector& names, const std::unordered_map& shape_hints, diff --git a/test/cpp/jit/torch_python_test.cpp b/test/cpp/jit/torch_python_test.cpp index 74b93436688..14193f6168b 100644 --- a/test/cpp/jit/torch_python_test.cpp +++ b/test/cpp/jit/torch_python_test.cpp @@ -34,29 +34,30 @@ void testEvalModeForLoadedModule() { AT_ASSERT(module.attr("dropout").toModule().is_training()); } -void testSerializationInterop() { - if (isSandcastle()) { - // The module file to load is not generated in Sandcastle - return; - } +// TODO: this test never ran before and is broken. +// void testSerializationInterop() { +// if (isSandcastle()) { +// // The module file to load is not generated in Sandcastle +// return; +// } - // This should be generated by `test/cpp/jit/tests_setup.py` - std::ifstream input_stream("ivalue.pt"); - std::vector input; - input.insert( - input.begin(), - std::istream_iterator(input_stream), - std::istream_iterator()); - IValue ivalue = pickle_load(input); +// // This should be generated by `test/cpp/jit/tests_setup.py` +// std::ifstream input_stream("ivalue.pt"); +// std::vector input; +// input.insert( +// input.begin(), +// std::istream_iterator(input_stream), +// std::istream_iterator()); +// IValue ivalue = pickle_load(input); - auto elements = ivalue.toTupleRef().elements(); - auto ones = torch::ones({2, 2}); - AT_ASSERT(ones.equal(elements.at(0).toTensor())); +// auto elements = ivalue.toTupleRef().elements(); +// auto ones = torch::ones({2, 2}); +// AT_ASSERT(ones.equal(elements.at(0).toTensor())); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) - auto twos = torch::ones({3, 5}) * 2; - AT_ASSERT(twos.equal(elements.at(1).toTensor())); -} +// // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) +// auto twos = torch::ones({3, 5}) * 2; +// AT_ASSERT(twos.equal(elements.at(1).toTensor())); +// } void testTorchSaveError() { if (isSandcastle()) { diff --git a/test/cpp/tensorexpr/test_conv.cpp b/test/cpp/tensorexpr/test_conv.cpp index cf458af0209..e72303873a6 100644 --- a/test/cpp/tensorexpr/test_conv.cpp +++ b/test/cpp/tensorexpr/test_conv.cpp @@ -12,14 +12,14 @@ namespace jit { namespace te = torch::jit::tensorexpr; namespace F = torch::nn::functional; +#ifdef TORCH_ENABLE_LLVM + // Generate test data with few bits of precision, to minimize error // accumulation from floating-point reordering. static at::Tensor genTestData(c10::IntArrayRef args) { return at::trunc(at::randn(args) * 256.0f) / 256.0f; } -#ifdef TORCH_ENABLE_LLVM - TEST(Conv, DepthwiseConv2D) { constexpr int N = 1, C = 72, H = 56, W = 56; constexpr int K = 72, R = 3, S = 3; diff --git a/test/cpp/tensorexpr/tutorial.cpp b/test/cpp/tensorexpr/tutorial.cpp index e34d980cf70..3f4c32af463 100644 --- a/test/cpp/tensorexpr/tutorial.cpp +++ b/test/cpp/tensorexpr/tutorial.cpp @@ -54,9 +54,13 @@ using namespace torch::jit::tensorexpr; +#ifdef TORCH_ENABLE_LLVM + // Helper function to print a snippet from a big multi-line string static void printLinesToFrom(const std::string& input_str, int from, int to); +#endif + int main(int argc, char* argv[]) { std::cout << "*** Structure of tensor expressions and statements ***" << std::endl; diff --git a/torch/csrc/DynamicTypes.cpp b/torch/csrc/DynamicTypes.cpp index 3ae7b8415e5..c929f383211 100644 --- a/torch/csrc/DynamicTypes.cpp +++ b/torch/csrc/DynamicTypes.cpp @@ -28,22 +28,6 @@ std::array(at::ScalarType::NumOptions)> dtype_regist std::array(at::Layout::NumOptions)> layout_registry = {}; -at::Backend get_backend(bool is_cuda, bool is_sparse) { - if (is_cuda) { - if (is_sparse){ - return at::Backend::SparseCUDA; - } else { - return at::Backend::CUDA; - } - } else { - if (is_sparse){ - return at::Backend::SparseCPU; - } else { - return at::Backend::CPU; - } - } -} - at::DeprecatedTypeProperties* get_type_properties(at::DeviceType device_type, at::ScalarType scalarType) { at::Backend backend; if (device_type == at::kCPU) { diff --git a/torch/csrc/Storage.cpp b/torch/csrc/Storage.cpp index 8de5ae5564c..1df0fa16c5e 100644 --- a/torch/csrc/Storage.cpp +++ b/torch/csrc/Storage.cpp @@ -337,20 +337,6 @@ static PyObject * THPStorage_device(THPStorage* self, void *unused) { END_HANDLE_TH_ERRORS } -static PyObject * THPStorage_dtype(THPStorage *self, void *unused) -{ - HANDLE_TH_ERRORS - return torch::autograd::utils::wrap( - torch::getTHPDtype(at::typeMetaToScalarType( -#ifdef THQUANTIZED - caffe2::TypeMeta::Make() -#else - caffe2::TypeMeta::Make() -#endif - ))); - END_HANDLE_TH_ERRORS -} - typedef PyObject *(*getter)(PyObject *, void *); // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables) diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp index f6c1bc74711..fb333d2629c 100644 --- a/torch/csrc/autograd/init.cpp +++ b/torch/csrc/autograd/init.cpp @@ -480,18 +480,6 @@ static PyObject * set_autocast_cpu_dtype(PyObject* _unused, PyObject *arg) { END_HANDLE_TH_ERRORS } -static const char* scalarTypeName(const at::ScalarType type) { - switch (type) { -#define DEFINE_CASE(ctype, name) \ - case at::ScalarType::name: \ - return #ctype; - AT_FORAUTOCAST_SCALAR_TYPES(DEFINE_CASE) -#undef DEFINE_CASE - default: - throw std::runtime_error("unknown scalar type for autocast"); - } -} - static PyObject * get_autocast_gpu_dtype(PyObject* _unused, PyObject *arg){ HANDLE_TH_ERRORS at::ScalarType current_dtype = at::autocast::get_autocast_gpu_dtype(); diff --git a/torch/csrc/jit/codegen/cuda/codegen.cpp b/torch/csrc/jit/codegen/cuda/codegen.cpp index 9aee0cc4c52..70d6c308f9e 100644 --- a/torch/csrc/jit/codegen/cuda/codegen.cpp +++ b/torch/csrc/jit/codegen/cuda/codegen.cpp @@ -27,12 +27,6 @@ std::string ptrType(DataType dt) { return ss.str(); } -std::string refType(DataType dt) { - std::stringstream ss; - ss << dt << "&"; - return ss.str(); -} - //! Utility class to build an argument list class ArgumentBuilder { public: diff --git a/torch/csrc/jit/codegen/cuda/index_compute.cpp b/torch/csrc/jit/codegen/cuda/index_compute.cpp index a000dca87a1..93f9e3cf899 100644 --- a/torch/csrc/jit/codegen/cuda/index_compute.cpp +++ b/torch/csrc/jit/codegen/cuda/index_compute.cpp @@ -2407,19 +2407,6 @@ std::vector getPredicateContigIds( return contig_id_infos; } -IterDomain* getMappedReferenceDomain( - IterDomain* id, - const ReferenceTensor& reference) { - // Partially overlaps with getPredicateContigIds() - auto concrete_id = GpuLower::current()->caMap()->getConcreteMappedID( - id, IdMappingMode::EXACT); - auto it = reference.concrete_to_id.find(concrete_id); - if (it == reference.concrete_to_id.end()) { - return nullptr; - } - return it->second; -} - std::vector getNonDivisibleConsumerDomainsToPredicate( TensorView* consumer_tv) { const auto& non_divisible_split_info = diff --git a/torch/csrc/jit/codegen/cuda/ir_nodes.cpp b/torch/csrc/jit/codegen/cuda/ir_nodes.cpp index f2c366e24c5..9bd5381b2c9 100644 --- a/torch/csrc/jit/codegen/cuda/ir_nodes.cpp +++ b/torch/csrc/jit/codegen/cuda/ir_nodes.cpp @@ -1351,18 +1351,6 @@ TensorDomain::TensorDomain(const TensorDomain* src, IrCloner* ir_cloner) contiguity_(src->contiguity()), has_nontrivial_reduction_(src->has_nontrivial_reduction_) {} -namespace { -std::vector lowerIterDomains( - const std::vector& domains) { - std::vector lowered_domains; - lowered_domains.reserve(domains.size()); - for (const auto iter_domain : domains) { - lowered_domains.push_back(iter_domain); - } - return lowered_domains; -}; -} // namespace - bool TensorDomain::hasBlockBroadcast() const { return std::any_of(domain_.begin(), domain_.end(), [](IterDomain* id) { return id->isBroadcast() && id->isThreadDim(); diff --git a/torch/csrc/jit/codegen/cuda/kernel_cache.cpp b/torch/csrc/jit/codegen/cuda/kernel_cache.cpp index 9d75362f4c6..f4585e940f2 100644 --- a/torch/csrc/jit/codegen/cuda/kernel_cache.cpp +++ b/torch/csrc/jit/codegen/cuda/kernel_cache.cpp @@ -42,11 +42,6 @@ int getCommonDeviceCUDA(const at::ArrayRef& inputs) { return index; } -// TODO: temporary hack to resolve my is_constructible issue; -std::vector toVector(const at::DimVector& small_vec) { - return std::vector(small_vec.begin(), small_vec.end()); -} - void encodeBuffer(size_t value, std::string& buffer) { const char* v = reinterpret_cast(&value); for (const auto i : c10::irange(sizeof(size_t))) { diff --git a/torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp b/torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp index 281fa05bb2b..ecf7320b4fd 100644 --- a/torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp +++ b/torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp @@ -555,39 +555,41 @@ ExprGroup* ExprSegmentationSorter::makeEmptyGroup(Expr* expr) { } // Debug function that prints the current state of the sorter. -std::string ExprSegmentationSorter::toString(int verbosity) const { - std::stringstream ss; - ss << "{\n"; - for (auto& group : groups_) { - ss << " " << group.get() << "\n"; +// +// Uncomment if needed. +// std::string ExprSegmentationSorter::toString(int verbosity) const { +// std::stringstream ss; +// ss << "{\n"; +// for (auto& group : groups_) { +// ss << " " << group.get() << "\n"; - if (verbosity > 1) { - if (group->producerEdges().size() > 0) { - ss << "Produced by groups with edges: { \n"; - for (auto producer_edge : group->producerEdges()) { - ss << producer_edge->producer_val_ << " -> " - << producer_edge->consumer_val_ << "\n"; - } - ss << " }" - << "\n"; - } - } +// if (verbosity > 1) { +// if (group->producerEdges().size() > 0) { +// ss << "Produced by groups with edges: { \n"; +// for (auto producer_edge : group->producerEdges()) { +// ss << producer_edge->producer_val_ << " -> " +// << producer_edge->consumer_val_ << "\n"; +// } +// ss << " }" +// << "\n"; +// } +// } - if (verbosity > 1) { - if (group->consumerEdges().size() > 0) { - ss << "Consumed by groups with edges: { \n"; - for (auto consumer_edge : group->consumerEdges()) { - ss << consumer_edge->producer_val_ << " -> " - << consumer_edge->consumer_val_ << "\n"; - } - ss << " }" - << "\n"; - } - } - } - ss << "}\n"; - return ss.str(); -} +// if (verbosity > 1) { +// if (group->consumerEdges().size() > 0) { +// ss << "Consumed by groups with edges: { \n"; +// for (auto consumer_edge : group->consumerEdges()) { +// ss << consumer_edge->producer_val_ << " -> " +// << consumer_edge->consumer_val_ << "\n"; +// } +// ss << " }" +// << "\n"; +// } +// } +// } +// ss << "}\n"; +// return ss.str(); +// } namespace { diff --git a/torch/csrc/jit/codegen/cuda/parser.cpp b/torch/csrc/jit/codegen/cuda/parser.cpp index 169d41bb875..c98d4eb8954 100644 --- a/torch/csrc/jit/codegen/cuda/parser.cpp +++ b/torch/csrc/jit/codegen/cuda/parser.cpp @@ -386,10 +386,6 @@ struct MemoryCompare { } }; -bool operator==(const MemoryFormat& a, const MemoryFormat& b) { - return a.permutation_ == b.permutation_; -}; - typedef std::map MemoryFormatMap; MemoryFormat operator+(const MemoryFormat& a, const MemoryFormat& b) { diff --git a/torch/csrc/jit/mobile/compatibility/backport_manager.cpp b/torch/csrc/jit/mobile/compatibility/backport_manager.cpp index e6413ceb030..2bad08c0765 100644 --- a/torch/csrc/jit/mobile/compatibility/backport_manager.cpp +++ b/torch/csrc/jit/mobile/compatibility/backport_manager.cpp @@ -77,21 +77,6 @@ void selective_copy( } } -// Copy all content from reader to stringstream -void get_model_stream(PyTorchStreamReader& reader, std::stringstream& out) { - auto writer_func = [&](const void* buf, size_t nbytes) -> size_t { - out.write(static_cast(buf), nbytes); - return !out ? 0 : nbytes; - }; - PyTorchStreamWriter writer(writer_func); - - selective_copy( - reader, - writer, - std::unordered_set(), - std::unordered_set()); -} - // The write_archive_current function is used for bytecode from version v5 to // v7 (the latest bytecode version). pre-v5 we serialized things differently. // This write archive function may change in export_module.cpp, however we don't diff --git a/torch/csrc/jit/mobile/module.cpp b/torch/csrc/jit/mobile/module.cpp index 1483af299c4..2ef7c34c28b 100644 --- a/torch/csrc/jit/mobile/module.cpp +++ b/torch/csrc/jit/mobile/module.cpp @@ -107,6 +107,7 @@ void slot_named_params_recurse( } } +#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE) std::string getTopModuleTypeName(const Module& m) { std::string name; if (m._ivalue()->type() && m._ivalue()->type()->name()) { @@ -114,6 +115,8 @@ std::string getTopModuleTypeName(const Module& m) { } return name; } +#endif + } // namespace const std::vector Module::parameters() const { diff --git a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp index 09271986bff..d5dbfe8cf5e 100644 --- a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp +++ b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp @@ -377,27 +377,6 @@ void ConvertGraphToONNXProto( } } -// this function checks wheather the blocks of If node have the same return -// type. -bool IsBlockReturnTypeSame(Node* n) { - TORCH_INTERNAL_ASSERT(n->kind() == ::c10::onnx::If); - auto then_block = n->blocks()[0]; - auto else_block = n->blocks()[1]; - for (const auto i : c10::irange(n->outputs().size())) { - // check the type - auto then_block_type = then_block->outputs()[i]->type(); - auto else_block_type = else_block->outputs()[i]->type(); - if (then_block_type->cast() && - else_block_type->cast()) { - if (then_block_type->castRaw()->scalarType() != - else_block_type->castRaw()->scalarType()) { - return false; - } - } - } - return true; -} - c10::optional ComputeConstantFolding(Node* n, int opset_version) { if (n->inputs().size() == 0) { return c10::nullopt; diff --git a/torch/csrc/jit/runtime/static/ops.cpp b/torch/csrc/jit/runtime/static/ops.cpp index cac0ebdc0fe..65f47776749 100644 --- a/torch/csrc/jit/runtime/static/ops.cpp +++ b/torch/csrc/jit/runtime/static/ops.cpp @@ -2642,12 +2642,6 @@ void signed_log1p_out(at::Tensor& out, const at::Tensor& input) { }); } -at::Tensor signed_log1p(const at::Tensor& input) { - auto out = create_empty_from(input); - signed_log1p_out(out, input); - return out; -} - } // namespace // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) diff --git a/torch/csrc/jit/serialization/export_module.cpp b/torch/csrc/jit/serialization/export_module.cpp index d0bfc84af23..b4e1f62a6c2 100644 --- a/torch/csrc/jit/serialization/export_module.cpp +++ b/torch/csrc/jit/serialization/export_module.cpp @@ -345,17 +345,6 @@ void pushMobileFunctionsToIValues( } } -std::unordered_set getInterfaceCalls(Graph& graph) { - std::unordered_set ret; - auto nodes = findAllNodes(graph, c10::prim::CallMethod, true); - for (Node* node : nodes) { - if (auto iface = node->input(0)->type()->castRaw()) { - ret.insert(iface->getMethod(node->s(attr::name))); - } - } - return ret; -} - struct ModuleMethod { ModuleMethod(const Module& m, const GraphFunction& f, c10::QualifiedName n) : module(m), function(f), exportName(std::move(n)) {} @@ -364,28 +353,6 @@ struct ModuleMethod { c10::QualifiedName exportName; }; -std::vector getModuleInterfaceExports( - const Module& module, - const std::unordered_set& schemas) { - if (schemas.size() == 0) { - return {}; - } - std::unordered_set names; - for (auto schema : schemas) { - names.insert(schema->name()); - } - std::vector ret; - for (const auto& submodule : module.modules()) { - for (const auto& method : submodule.get_methods()) { - const auto& f = toGraphFunction(method.function()); - if (names.find(f.qualname().name()) != names.end()) { - ret.emplace_back(submodule, f, f.qualname()); - } - } - } - return ret; -} - bool isLoweredModule(const Module& m) { c10::QualifiedName type_name; if (m.type()->name()) { diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index d11d7cd563e..2e19a5e6ba5 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -70,12 +70,6 @@ void maybe_initialize_cuda(const Device device) { // options. // TODO: Refactor this so we just pass everything in via options -Tensor dispatch_ones(c10::TensorOptions options, at::ScalarType scalar_type, const optional& device, IntArrayRef sizes) { - maybe_initialize_cuda(options.device()); - pybind11::gil_scoped_release no_gil; - return torch::ones(sizes, build_options(options, scalar_type, device)); -} - Tensor new_with_sizes(c10::TensorOptions options, at::ScalarType scalar_type, const optional& device, IntArrayRef sizes) { maybe_initialize_cuda(options.device()); pybind11::gil_scoped_release no_gil; @@ -469,14 +463,6 @@ Tensor legacy_sparse_tensor_generic_ctor_new(c10::DispatchKey dispatch_key, at:: throw std::runtime_error("new(): invalid arguments"); } -Tensor legacy_sparse_tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { - return legacy_sparse_tensor_generic_ctor_new(dispatch_key, scalar_type, args, kwargs, CtorOrNew::CTOR); -} - -Tensor legacy_sparse_tensor_new(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) { - return legacy_sparse_tensor_generic_ctor_new(dispatch_key, scalar_type, args, kwargs, CtorOrNew::NEW); -} - // NB: device_idx here is NOT a DeviceIndex, but index into PythonArgs c10::TensorOptions typeIdWithDefault(PythonArgs& r, int64_t device_idx, c10::DispatchKey dispatch_key) { auto options = dispatchKeyToTensorOptions(dispatch_key);