mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Hide torch_python symbols (#142214)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/142214 Approved by: https://github.com/ezyang
This commit is contained in:
parent
dcb128d495
commit
da76e912a4
|
|
@ -310,10 +310,6 @@ endif()
|
||||||
|
|
||||||
add_library(torch_python SHARED ${TORCH_PYTHON_SRCS})
|
add_library(torch_python SHARED ${TORCH_PYTHON_SRCS})
|
||||||
torch_compile_options(torch_python) # see cmake/public/utils.cmake
|
torch_compile_options(torch_python) # see cmake/public/utils.cmake
|
||||||
if(NOT WIN32)
|
|
||||||
target_compile_options(torch_python PRIVATE
|
|
||||||
$<$<COMPILE_LANGUAGE:CXX>: -fvisibility=default>)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(CAFFE2_USE_MKL AND BUILD_LIBTORCHLESS)
|
if(CAFFE2_USE_MKL AND BUILD_LIBTORCHLESS)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
#include <torch/csrc/Export.h>
|
||||||
#include <torch/csrc/python_headers.h>
|
#include <torch/csrc/python_headers.h>
|
||||||
|
|
||||||
#include <ATen/Layout.h>
|
#include <ATen/Layout.h>
|
||||||
|
|
@ -15,7 +15,7 @@ struct THPLayout {
|
||||||
char name[LAYOUT_NAME_LEN + 1];
|
char name[LAYOUT_NAME_LEN + 1];
|
||||||
};
|
};
|
||||||
|
|
||||||
extern PyTypeObject THPLayoutType;
|
TORCH_PYTHON_API extern PyTypeObject THPLayoutType;
|
||||||
|
|
||||||
inline bool THPLayout_Check(PyObject* obj) {
|
inline bool THPLayout_Check(PyObject* obj) {
|
||||||
return Py_TYPE(obj) == &THPLayoutType;
|
return Py_TYPE(obj) == &THPLayoutType;
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <torch/csrc/Export.h>
|
||||||
#include <torch/csrc/python_headers.h>
|
#include <torch/csrc/python_headers.h>
|
||||||
|
|
||||||
#include <c10/core/QScheme.h>
|
#include <c10/core/QScheme.h>
|
||||||
|
|
@ -15,7 +16,7 @@ struct THPQScheme {
|
||||||
char name[QSCHEME_NAME_LEN + 1];
|
char name[QSCHEME_NAME_LEN + 1];
|
||||||
};
|
};
|
||||||
|
|
||||||
extern PyTypeObject THPQSchemeType;
|
TORCH_PYTHON_API extern PyTypeObject THPQSchemeType;
|
||||||
|
|
||||||
inline bool THPQScheme_Check(PyObject* obj) {
|
inline bool THPQScheme_Check(PyObject* obj) {
|
||||||
return Py_TYPE(obj) == &THPQSchemeType;
|
return Py_TYPE(obj) == &THPQSchemeType;
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <c10/core/TensorOptions.h>
|
#include <c10/core/TensorOptions.h>
|
||||||
|
#include <torch/csrc/Export.h>
|
||||||
|
|
||||||
// device_lazy_init() is always compiled, even for CPU-only builds.
|
// device_lazy_init() is always compiled, even for CPU-only builds.
|
||||||
|
|
||||||
|
|
@ -23,7 +24,7 @@ namespace torch::utils {
|
||||||
* try to use CUDA or XPU functionality from a CPU-only build, which is not good
|
* try to use CUDA or XPU functionality from a CPU-only build, which is not good
|
||||||
* UX.
|
* UX.
|
||||||
*/
|
*/
|
||||||
void device_lazy_init(at::DeviceType device_type);
|
TORCH_PYTHON_API void device_lazy_init(at::DeviceType device_type);
|
||||||
void set_requires_device_init(at::DeviceType device_type, bool value);
|
void set_requires_device_init(at::DeviceType device_type, bool value);
|
||||||
|
|
||||||
inline void maybe_initialize_device(at::Device& device) {
|
inline void maybe_initialize_device(at::Device& device) {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user