[AOTInductor] Fix clang-tidy warnings in wrapper (#153197)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/153197
Approved by: https://github.com/desertfire
This commit is contained in:
Benjamin Glass 2025-05-12 22:35:53 +00:00 committed by PyTorch MergeBot
parent 3ff22fe2df
commit b0f2891e43
5 changed files with 32 additions and 23 deletions

View File

@ -228,6 +228,7 @@ include_patterns = [
'c10/**/*.cpp',
'c10/**/*.h',
'torch/*.h',
'torch/_inductor/codegen/aoti_runtime/interface.cpp',
'torch/csrc/*.h',
'torch/csrc/*.cpp',
'torch/csrc/**/*.h',

View File

@ -210,8 +210,9 @@ class AOTInductorTestsTemplate:
AOTIRunnerUtil.legacy_compile, model, example_inputs
)
# We should have 1 input, 1 output, 2 constants for the model.
check_str = "AOTInductorModelBase(1, 1, 2"
FileCheck().check_count(check_str, 1).run(code)
FileCheck().check_count("AOTInductorModelBase(1,", 1).check_next(
"1,"
).check_next("2,").run(code)
def test_constant_folding(self):
class Model(torch.nn.Module):

View File

@ -4,20 +4,18 @@
#include <torch/csrc/inductor/aoti_runtime/model_container.h>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <vector>
#define CONVERT_EXCEPTION_TO_ERROR_CODE(...) \
try { \
__VA_ARGS__ \
} catch (const std::exception& e) { \
std::cerr << "Error: " << e.what() << std::endl; \
return AOTI_RUNTIME_FAILURE; \
} catch (...) { \
std::cerr << "Unknown exception occurred." << std::endl; \
return AOTI_RUNTIME_FAILURE; \
} \
#define CONVERT_EXCEPTION_TO_ERROR_CODE(...) \
try { \
__VA_ARGS__ \
} catch (const std::exception& e) { \
std::cerr << "Error: " << e.what() << '\n'; \
return AOTI_RUNTIME_FAILURE; \
} catch (...) { \
std::cerr << "Unknown exception occurred.\n"; \
return AOTI_RUNTIME_FAILURE; \
} \
return AOTI_RUNTIME_SUCCESS;
#define AOTI_VECTOR_SIZE_CHECK(actual_size, expected_size, name) \
@ -36,13 +34,17 @@
// A RAII, thread local (!) guard that enables or disables grad mode upon
// construction, and sets it back to the original value upon destruction.
struct AOTINoGradGuard {
AOTINoGradGuard() : prev_mode(aoti_torch_grad_mode_is_enabled()) {
AOTINoGradGuard() {
aoti_torch_grad_mode_set_enabled(false);
}
AOTINoGradGuard(const AOTINoGradGuard&) = delete;
AOTINoGradGuard(AOTINoGradGuard&&) noexcept = delete;
~AOTINoGradGuard() {
aoti_torch_grad_mode_set_enabled(prev_mode);
}
bool prev_mode;
AOTINoGradGuard& operator=(const AOTINoGradGuard&) = delete;
AOTINoGradGuard& operator=(AOTINoGradGuard&&) noexcept = delete;
bool prev_mode{aoti_torch_grad_mode_is_enabled()};
};
extern "C" {
@ -65,7 +67,7 @@ AOTIRuntimeError AOTInductorModelContainerCreateWithDevice(
const char* device_str,
const char* cubin_dir) {
if (num_models == 0) {
std::cerr << "Error: num_models must be positive, but got 0" << std::endl;
std::cerr << "Error: num_models must be positive, but got 0\n";
return AOTI_RUNTIME_FAILURE;
}
CONVERT_EXCEPTION_TO_ERROR_CODE({

View File

@ -449,7 +449,7 @@ class CppWrapperCpu(PythonWrapperCodegen):
# inline done by the host compiler
self.prefix.splice(
"""
bool _check_aoti_runtime_check_inputs_env() {
static bool _check_aoti_runtime_check_inputs_env() {
const static char* env_var_value = getenv("AOTI_RUNTIME_CHECK_INPUTS");
const static bool result = env_var_value != nullptr && env_var_value[0] != '0';
return result;
@ -606,7 +606,7 @@ class CppWrapperCpu(PythonWrapperCodegen):
if not V.graph.is_const_graph:
self.prefix.writeline("inputs.clear();")
self.prefix.writeline(
"auto& kernels = static_cast<AOTInductorModelKernels&>(*this->kernels_.get());"
"[[maybe_unused]] auto& kernels = static_cast<AOTInductorModelKernels&>(*this->kernels_.get());"
)
def codegen_tensor_dtype_var_decl(self, code: IndentedBuffer, name):
@ -701,7 +701,12 @@ class CppWrapperCpu(PythonWrapperCodegen):
const std::string& device_str,
std::optional<std::string> cubin_dir,
bool include_weights)
: AOTInductorModelBase({num_inputs}, {num_outputs}, {num_constants}, device_str, cubin_dir, {include_weights}) {{
: AOTInductorModelBase({num_inputs},
{num_outputs},
{num_constants},
device_str,
std::move(cubin_dir),
{include_weights}) {{
"""
)
@ -812,10 +817,10 @@ class CppWrapperCpu(PythonWrapperCodegen):
)
self.prefix.writeline(
f'in_spec_ = "{escape_string(config.aot_inductor.serialized_in_spec)}";'
f'in_spec_ = R"({config.aot_inductor.serialized_in_spec})";'
)
self.prefix.writeline(
f'out_spec_ = "{escape_string(config.aot_inductor.serialized_out_spec)}";'
f'out_spec_ = R"({config.aot_inductor.serialized_out_spec})";'
)
for idx, output in enumerate(V.graph.graph_outputs):

View File

@ -356,7 +356,7 @@ class CppWrapperCpuArrayRef(CppWrapperCpu):
else:
self.prefix.writeline("inputs.clear();")
self.prefix.writeline(
"auto& kernels = static_cast<AOTInductorModelKernels&>(*this->kernels_.get());"
"[[maybe_unused]] auto& kernels = static_cast<AOTInductorModelKernels&>(*this->kernels_.get());"
)
def generate_return(self, output_refs: list[str]):