mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary:
Anywhere we used #include "foo.h", we now say #include <foo.h>
Paths are adjusted to be rooted out of aten/src, torch/lib, or
the root level directory.
I modified CMakeLists.txt by hand to remove TH and THC from
the include paths.
I used the following script to do the canonicalization:
```
import subprocess
import re
import os.path
files = subprocess.check_output(['git', 'ls-files']).decode('utf-8').rstrip().split('\n')
for fn in files:
if not any(fn.endswith(suff) for suff in ['.cu', '.cpp', '.in', '.h', '.hpp', '.cu', '.cuh', '.cc']):
continue
if not any(fn.startswith(pref) for pref in ["aten/", "torch/"]):
continue
with open(fn, 'r') as f:
c = f.read()
def fmt(p):
return "#include <{}>".format(p)
def repl(m):
p = m.group(1)
if p in ["dlfcn.h", "unistd.h", "nvrtc.h", "cuda.h", "cuda_runtime.h", "cstdint", "cudnn.h", "Python.h", "cusparse.h", "cuda_runtime_api.h", "cuda_fp16.h", "cublas_v2.h", "stdint.h", "curand_kernel.h"]:
return fmt(p)
if any(p.startswith(pref) for pref in ["torch/csrc", "c10/", "ATen/", "caffe2/", "TH/", "THC/", "Eigen/", "gtest/", "zdl/", "gloo/", "onnx/", "miopen/"]):
return fmt(p)
for root in ["aten/src", "torch/lib", ""]:
for bad_root in [os.path.dirname(fn), "aten/src/TH", "aten/src/THC", "torch/csrc"]:
new_p = os.path.relpath(os.path.join(bad_root, p), root)
if not new_p.startswith("../") and (os.path.exists(os.path.join(root, new_p)) or os.path.exists(os.path.join(root, new_p + ".in"))):
return fmt(new_p)
print("ERROR: ", fn, p)
return m.group(0)
new_c = re.sub(r'#include "([^"]+)"', repl, c)
if new_c != c:
print(fn)
with open(fn, 'w') as f:
f.write(new_c)
```
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14849
Reviewed By: dzhulgakov
Differential Revision: D13363445
Pulled By: ezyang
fbshipit-source-id: 52361f878a672785f9306c9e9ab2513128092b68
140 lines
3.7 KiB
C++
140 lines
3.7 KiB
C++
#pragma once
|
|
|
|
#include <torch/csrc/python_headers.h>
|
|
#include <cstdint>
|
|
#include <stdexcept>
|
|
#include <torch/csrc/Exceptions.h>
|
|
#include <torch/csrc/utils/tensor_numpy.h>
|
|
#include <torch/csrc/jit/tracing_state.h>
|
|
|
|
// largest integer that can be represented consecutively in a double
|
|
const int64_t DOUBLE_INT_MAX = 9007199254740992;
|
|
|
|
inline PyObject* THPUtils_packInt64(int64_t value) {
|
|
#if PY_MAJOR_VERSION == 2
|
|
if (sizeof(long) == sizeof(int64_t)) {
|
|
return PyInt_FromLong(static_cast<long>(value));
|
|
} else if (value <= INT32_MAX && value >= INT32_MIN) {
|
|
return PyInt_FromLong(static_cast<long>(value));
|
|
}
|
|
#endif
|
|
return PyLong_FromLongLong(value);
|
|
}
|
|
|
|
inline PyObject* THPUtils_packUInt64(uint64_t value) {
|
|
#if PY_MAJOR_VERSION == 2
|
|
if (value <= INT32_MAX) {
|
|
return PyInt_FromLong(static_cast<long>(value));
|
|
}
|
|
#endif
|
|
return PyLong_FromUnsignedLongLong(value);
|
|
}
|
|
|
|
inline PyObject* THPUtils_packDoubleAsInt(double value) {
|
|
#if PY_MAJOR_VERSION == 2
|
|
if (value <= INT32_MAX && value >= INT32_MIN) {
|
|
return PyInt_FromLong(static_cast<long>(value));
|
|
}
|
|
#endif
|
|
return PyLong_FromDouble(value);
|
|
}
|
|
|
|
inline bool THPUtils_checkLong(PyObject* obj) {
|
|
#if PY_MAJOR_VERSION == 2
|
|
return (PyLong_Check(obj) || PyInt_Check(obj)) && !PyBool_Check(obj);
|
|
#else
|
|
return PyLong_Check(obj) && !PyBool_Check(obj);
|
|
#endif
|
|
}
|
|
|
|
inline int64_t THPUtils_unpackLong(PyObject* obj) {
|
|
int overflow;
|
|
long long value = PyLong_AsLongLongAndOverflow(obj, &overflow);
|
|
if (value == -1 && PyErr_Occurred()) {
|
|
throw python_error();
|
|
}
|
|
if (overflow != 0) {
|
|
throw std::runtime_error("Overflow when unpacking long");
|
|
}
|
|
return (int64_t)value;
|
|
}
|
|
|
|
inline bool THPUtils_checkIndex(PyObject *obj) {
|
|
if (PyBool_Check(obj)) {
|
|
return false;
|
|
}
|
|
if (THPUtils_checkLong(obj)) {
|
|
return true;
|
|
}
|
|
torch::jit::tracer::NoWarn no_warn_guard;
|
|
auto index = THPObjectPtr(PyNumber_Index(obj));
|
|
if (!index) {
|
|
PyErr_Clear();
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
inline int64_t THPUtils_unpackIndex(PyObject* obj) {
|
|
if (!THPUtils_checkLong(obj)) {
|
|
auto index = THPObjectPtr(PyNumber_Index(obj));
|
|
if (index == nullptr) {
|
|
throw python_error();
|
|
}
|
|
// NB: This needs to be called before `index` goes out of scope and the
|
|
// underlying object's refcount is decremented
|
|
return THPUtils_unpackLong(index.get());
|
|
}
|
|
return THPUtils_unpackLong(obj);
|
|
}
|
|
|
|
inline bool THPUtils_checkDouble(PyObject* obj) {
|
|
bool is_numpy_scalar;
|
|
#ifdef USE_NUMPY
|
|
is_numpy_scalar = torch::utils::is_numpy_scalar(obj);
|
|
#else
|
|
is_numpy_scalar = false;
|
|
#endif
|
|
#if PY_MAJOR_VERSION == 2
|
|
return PyFloat_Check(obj) || PyLong_Check(obj) || PyInt_Check(obj) || is_numpy_scalar;
|
|
#else
|
|
return PyFloat_Check(obj) || PyLong_Check(obj) || is_numpy_scalar;
|
|
#endif
|
|
}
|
|
|
|
inline double THPUtils_unpackDouble(PyObject* obj) {
|
|
if (PyFloat_Check(obj)) {
|
|
return PyFloat_AS_DOUBLE(obj);
|
|
}
|
|
if (PyLong_Check(obj)) {
|
|
int overflow;
|
|
long long value = PyLong_AsLongLongAndOverflow(obj, &overflow);
|
|
if (overflow != 0) {
|
|
throw std::runtime_error("Overflow when unpacking double");
|
|
}
|
|
if (value > DOUBLE_INT_MAX || value < -DOUBLE_INT_MAX) {
|
|
throw std::runtime_error("Precision loss when unpacking double");
|
|
}
|
|
return (double)value;
|
|
}
|
|
#if PY_MAJOR_VERSION == 2
|
|
if (PyInt_Check(obj)) {
|
|
return (double)PyInt_AS_LONG(obj);
|
|
}
|
|
#endif
|
|
double value = PyFloat_AsDouble(obj);
|
|
if (value == -1 && PyErr_Occurred()) {
|
|
throw python_error();
|
|
}
|
|
return value;
|
|
}
|
|
|
|
inline std::complex<double> THPUtils_unpackComplexDouble(PyObject *obj) {
|
|
Py_complex value = PyComplex_AsCComplex(obj);
|
|
if (value.real == -1.0 && PyErr_Occurred()) {
|
|
throw python_error();
|
|
}
|
|
|
|
return std::complex<double>(value.real, value.imag);
|
|
}
|