mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/70851
This is a step towards OSS/fbcode convergence since OSS uses this file
in both CMake and Bazel.
ghstack-source-id: 147170896
Test Plan: Relying on the extensive CI internal tests for this.
Reviewed By: malfet
Differential Revision: D33299102
fbshipit-source-id: c650dd4755f8d696d5fce81c583d5c73782e3990
(cherry picked from commit 741ca140c8)
45 lines
1.2 KiB
C
45 lines
1.2 KiB
C
#pragma once
|
|
|
|
#ifndef C10_USING_CUSTOM_GENERATED_MACROS
|
|
|
|
// We have not yet modified the AMD HIP build to generate this file so
|
|
// we add an extra option to specifically ignore it.
|
|
#ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE
|
|
#include <c10/cuda/impl/cuda_cmake_macros.h>
|
|
#endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE
|
|
|
|
#endif
|
|
|
|
// See c10/macros/Export.h for a detailed explanation of what the function
|
|
// of these macros are. We need one set of macros for every separate library
|
|
// we build.
|
|
|
|
#ifdef _WIN32
|
|
#if defined(C10_CUDA_BUILD_SHARED_LIBS)
|
|
#define C10_CUDA_EXPORT __declspec(dllexport)
|
|
#define C10_CUDA_IMPORT __declspec(dllimport)
|
|
#else
|
|
#define C10_CUDA_EXPORT
|
|
#define C10_CUDA_IMPORT
|
|
#endif
|
|
#else // _WIN32
|
|
#if defined(__GNUC__)
|
|
#define C10_CUDA_EXPORT __attribute__((__visibility__("default")))
|
|
#else // defined(__GNUC__)
|
|
#define C10_CUDA_EXPORT
|
|
#endif // defined(__GNUC__)
|
|
#define C10_CUDA_IMPORT C10_CUDA_EXPORT
|
|
#endif // _WIN32
|
|
|
|
// This one is being used by libc10_cuda.so
|
|
#ifdef C10_CUDA_BUILD_MAIN_LIB
|
|
#define C10_CUDA_API C10_CUDA_EXPORT
|
|
#else
|
|
#define C10_CUDA_API C10_CUDA_IMPORT
|
|
#endif
|
|
|
|
/**
|
|
* The maximum number of GPUs that we recognizes.
|
|
*/
|
|
#define C10_COMPILE_TIME_MAX_GPUS 16
|