mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 00:20:18 +01:00
[Inductor][CPP] rename shim_mkldnn.h/.cpp to shim_cpu.h/.cpp (#149372)
**Summary** Previous discussion is here: https://github.com/pytorch/pytorch/pull/148907#issuecomment-2712795600 Rename these files because - they may hold mkldnn-unrelated code for CPU - filenames are aligned with files for CUDA and XPU Pull Request resolved: https://github.com/pytorch/pytorch/pull/149372 Approved by: https://github.com/leslie-fang-intel, https://github.com/jgong5, https://github.com/desertfire
This commit is contained in:
parent
a39bf846f5
commit
64bd889660
|
|
@ -472,7 +472,7 @@ inductor_core_resources = [
|
|||
"torch/csrc/inductor/aoti_runner/model_container_runner.cpp",
|
||||
"torch/csrc/inductor/aoti_runner/model_container_runner_cpu.cpp",
|
||||
"torch/csrc/inductor/aoti_torch/shim_common.cpp",
|
||||
"torch/csrc/inductor/aoti_torch/shim_mkldnn.cpp",
|
||||
"torch/csrc/inductor/aoti_torch/shim_cpu.cpp",
|
||||
"torch/csrc/inductor/aoti_torch/tensor_converter.cpp",
|
||||
"torch/csrc/inductor/aoti_torch/mkldnn_tensor.cpp",
|
||||
"torch/csrc/inductor/aoti_torch/oss_proxy_executor.cpp",
|
||||
|
|
|
|||
|
|
@ -291,7 +291,7 @@ class ConvolutionUnary(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
|
||||
@classmethod
|
||||
|
|
@ -349,7 +349,7 @@ class ConvolutionBinary(ExternKernelAlloc):
|
|||
self.cpp_constant_args = cpp_constant_args
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
|
||||
@classmethod
|
||||
|
|
@ -420,7 +420,7 @@ class ConvolutionBinaryInplace(ExternKernelAlloc):
|
|||
]
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
|
||||
def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:
|
||||
|
|
@ -489,7 +489,7 @@ class ConvolutionTransposeUnary(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
|
||||
@classmethod
|
||||
|
|
@ -567,7 +567,7 @@ class QConvPointWisePT2E(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
if isinstance(self.layout, Layout):
|
||||
self.codegen_size_asserts(wrapper)
|
||||
|
|
@ -672,7 +672,7 @@ class QConvPointWiseBinaryPT2E(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
if isinstance(self.layout, Layout):
|
||||
self.codegen_size_asserts(wrapper)
|
||||
|
|
@ -782,7 +782,7 @@ class MKLPackedLinear(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
|
||||
@classmethod
|
||||
|
|
@ -826,7 +826,7 @@ class LinearUnary(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
|
||||
@classmethod
|
||||
|
|
@ -879,7 +879,7 @@ class LinearBinary(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
|
||||
@classmethod
|
||||
|
|
@ -943,7 +943,7 @@ class QLinearPointwisePT2E(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
|
||||
if isinstance(self.layout, Layout):
|
||||
|
|
@ -1027,7 +1027,7 @@ class QLinearPointwiseBinaryPT2E(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
if isinstance(self.layout, Layout):
|
||||
self.codegen_size_asserts(wrapper)
|
||||
|
|
@ -1225,11 +1225,11 @@ class MkldnnRnnLayer(ExternKernelAlloc):
|
|||
return output_ir
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
return super().codegen(wrapper)
|
||||
|
||||
|
||||
# Add this IR so that we can include shim_mkldnn.h for cpp_wrapper
|
||||
# Add this IR so that we can include shim_cpu.h for cpp_wrapper
|
||||
class WeightInt4PackMatmul(ExternKernelAlloc):
|
||||
def __init__(
|
||||
self,
|
||||
|
|
@ -1253,7 +1253,7 @@ class WeightInt4PackMatmul(ExternKernelAlloc):
|
|||
)
|
||||
|
||||
def codegen(self, wrapper):
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h")
|
||||
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
|
||||
super().codegen(wrapper)
|
||||
|
||||
if isinstance(self.layout, Layout):
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
#ifndef AOTI_TORCH_SHIM_MKLDNN
|
||||
#define AOTI_TORCH_SHIM_MKLDNN
|
||||
#ifndef AOTI_TORCH_SHIM_CPU
|
||||
#define AOTI_TORCH_SHIM_CPU
|
||||
|
||||
#include <ATen/Config.h>
|
||||
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
|
||||
|
|
@ -248,4 +248,4 @@ AOTI_TORCH_EXPORT AOTITorchError aoti_torch_cpu__weight_int4pack_mm_cpu_tensor(
|
|||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // AOTI_TORCH_SHIM_MKLDNN
|
||||
#endif // AOTI_TORCH_SHIM_CPU
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
|
||||
#include <torch/csrc/inductor/aoti_torch/c/shim_mkldnn.h>
|
||||
#include <torch/csrc/inductor/aoti_torch/c/shim_cpu.h>
|
||||
#include <torch/csrc/inductor/aoti_torch/utils.h>
|
||||
|
||||
#ifndef AT_PER_OPERATOR_HEADERS
|
||||
Loading…
Reference in New Issue
Block a user