mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: When enabled, it will generate `torch_cuda_linalg` library, which would depend on cusolve and magma and registers dynamic bindings to it from LinearAlgebraStubs Avoid symbol clashes that can result in infinite recursion by moving all symbols in the library to its own namespace. Add checks that should prevent calling self in recursion to `LinearAlgebraStubs.cpp` Pull Request resolved: https://github.com/pytorch/pytorch/pull/73447 Reviewed By: albanD Differential Revision: D34538827 Pulled By: malfet fbshipit-source-id: f2535b471d3524768a84b2e169b6aa24c26c03bf (cherry picked from commit 4ec24b079c861c1122f0fa86e280b977c3c2f7ac)
75 lines
2.6 KiB
Python
75 lines
2.6 KiB
Python
import sys
|
|
import torch.cuda
|
|
import os
|
|
from setuptools import setup
|
|
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
|
|
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
|
|
from torch.testing._internal.common_utils import IS_WINDOWS
|
|
|
|
if sys.platform == 'win32':
|
|
vc_version = os.getenv('VCToolsVersion', '')
|
|
if vc_version.startswith('14.16.'):
|
|
CXX_FLAGS = ['/sdl']
|
|
else:
|
|
CXX_FLAGS = ['/sdl', '/permissive-']
|
|
else:
|
|
CXX_FLAGS = ['-g']
|
|
|
|
USE_NINJA = os.getenv('USE_NINJA') == '1'
|
|
|
|
ext_modules = [
|
|
CppExtension(
|
|
'torch_test_cpp_extension.cpp', ['extension.cpp'],
|
|
extra_compile_args=CXX_FLAGS),
|
|
CppExtension(
|
|
'torch_test_cpp_extension.ort', ['ort_extension.cpp'],
|
|
extra_compile_args=CXX_FLAGS),
|
|
CppExtension(
|
|
'torch_test_cpp_extension.rng', ['rng_extension.cpp'],
|
|
extra_compile_args=CXX_FLAGS),
|
|
]
|
|
|
|
if torch.cuda.is_available() and (CUDA_HOME is not None or ROCM_HOME is not None):
|
|
extension = CUDAExtension(
|
|
'torch_test_cpp_extension.cuda', [
|
|
'cuda_extension.cpp',
|
|
'cuda_extension_kernel.cu',
|
|
'cuda_extension_kernel2.cu',
|
|
],
|
|
extra_compile_args={'cxx': CXX_FLAGS,
|
|
'nvcc': ['-O2']})
|
|
ext_modules.append(extension)
|
|
|
|
if torch.cuda.is_available() and (CUDA_HOME is not None or ROCM_HOME is not None):
|
|
extension = CUDAExtension(
|
|
'torch_test_cpp_extension.torch_library', [
|
|
'torch_library.cu'
|
|
],
|
|
extra_compile_args={'cxx': CXX_FLAGS,
|
|
'nvcc': ['-O2']})
|
|
ext_modules.append(extension)
|
|
|
|
# todo(mkozuki): Figure out the root cause
|
|
if (not IS_WINDOWS) and torch.cuda.is_available() and CUDA_HOME is not None:
|
|
# malfet: One shoudl not assume that PyTorch re-exports CUDA dependencies
|
|
cublas_extension = CUDAExtension(
|
|
name='torch_test_cpp_extension.cublas_extension',
|
|
sources=['cublas_extension.cpp'],
|
|
libraries=['cublas'] if torch.version.hip is None else [],
|
|
)
|
|
ext_modules.append(cublas_extension)
|
|
|
|
cusolver_extension = CUDAExtension(
|
|
name='torch_test_cpp_extension.cusolver_extension',
|
|
sources=['cusolver_extension.cpp'],
|
|
libraries=['cusolver'] if torch.version.hip is None else [],
|
|
)
|
|
ext_modules.append(cusolver_extension)
|
|
|
|
setup(
|
|
name='torch_test_cpp_extension',
|
|
packages=['torch_test_cpp_extension'],
|
|
ext_modules=ext_modules,
|
|
include_dirs='self_compiler_include_dirs_test',
|
|
cmdclass={'build_ext': BuildExtension.with_options(use_ninja=USE_NINJA)})
|