NULL -> nullptr (#11047)

Summary:
How did we get so many uses of `NULL` again?

ezyang
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11047

Differential Revision: D9566799

Pulled By: goldsborough

fbshipit-source-id: 83469f352ac69aa65bdaf1a1a21f922d892e0db3
This commit is contained in:
Peter Goldsborough 2018-08-30 16:22:24 -07:00 committed by Facebook Github Bot
parent 302e9cb815
commit 7ddc6f84c4
33 changed files with 332 additions and 332 deletions

View File

@ -37,6 +37,7 @@ Checks: '
,-performance-unnecessary-value-param
,-readability-braces-around-statements
,-readability-else-after-return
,-readability-implicit-bool-conversion
,-readability-named-parameter
'
WarningsAsErrors: ''

View File

@ -39,7 +39,7 @@ static void HANDLER_NAME(int sig, siginfo_t *info, void *ctx) \
struct sigaction sa; \
sa.sa_handler = SIG_DFL; \
sa.sa_flags = 0; \
if (sigemptyset(&sa.sa_mask) != 0 || sigaction(SIGNAL, &sa, NULL) != 0) { \
if (sigemptyset(&sa.sa_mask) != 0 || sigaction(SIGNAL, &sa, nullptr) != 0) { \
_exit(EXIT_FAILURE); \
} else { \
raise(SIGNAL); \
@ -80,7 +80,7 @@ static void handler_SIGTERM(int sig, siginfo_t *info, void *ctx)
struct sigaction sa;
sa.sa_handler = SIG_DFL;
sa.sa_flags = 0;
if (sigemptyset(&sa.sa_mask) != 0 || sigaction(SIGTERM, &sa, NULL) != 0) {
if (sigemptyset(&sa.sa_mask) != 0 || sigaction(SIGTERM, &sa, nullptr) != 0) {
_exit(EXIT_FAILURE);
} else {
raise(SIGTERM);
@ -89,9 +89,9 @@ static void handler_SIGTERM(int sig, siginfo_t *info, void *ctx)
static PyObject *THPModule_setWorkerSignalHandlers(PyObject *module, PyObject *arg) {
HANDLE_TH_ERRORS
setSignalHandler(SIGBUS, &handler_SIGBUS, NULL);
setSignalHandler(SIGSEGV, &handler_SIGSEGV, NULL);
setSignalHandler(SIGTERM, &handler_SIGTERM, NULL);
setSignalHandler(SIGBUS, &handler_SIGBUS, nullptr);
setSignalHandler(SIGSEGV, &handler_SIGSEGV, nullptr);
setSignalHandler(SIGTERM, &handler_SIGTERM, nullptr);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
@ -212,9 +212,9 @@ static PyObject *THPModule_errorIfAnyWorkerFails(PyObject *module, PyObject *_ig
#endif
PyMethodDef DataLoaderMethods[] = {
{"_set_worker_signal_handlers", (PyCFunction)THPModule_setWorkerSignalHandlers, METH_NOARGS, NULL},
{"_update_worker_pids", (PyCFunction)THPModule_updateWorkerPIDs, METH_VARARGS, NULL},
{"_remove_worker_pids", (PyCFunction)THPModule_removeWorkerPIDs, METH_O, NULL},
{"_error_if_any_worker_fails", (PyCFunction)THPModule_errorIfAnyWorkerFails, METH_NOARGS, NULL},
{NULL, NULL, 0, NULL}
{"_set_worker_signal_handlers", (PyCFunction)THPModule_setWorkerSignalHandlers, METH_NOARGS, nullptr},
{"_update_worker_pids", (PyCFunction)THPModule_updateWorkerPIDs, METH_VARARGS, nullptr},
{"_remove_worker_pids", (PyCFunction)THPModule_removeWorkerPIDs, METH_O, nullptr},
{"_error_if_any_worker_fails", (PyCFunction)THPModule_errorIfAnyWorkerFails, METH_NOARGS, nullptr},
{nullptr, nullptr, 0, nullptr}
};

View File

@ -173,7 +173,7 @@ static struct PyGetSetDef THPDevice_properties[] = {
static PyMethodDef THPDevice_methods[] = {
{"__reduce__", (PyCFunction)THPDevice_reduce, METH_NOARGS, nullptr},
{NULL} /* Sentinel */
{nullptr} /* Sentinel */
};
PyTypeObject THPDeviceType = {

View File

@ -47,7 +47,7 @@ static struct PyGetSetDef THPDtype_properties[] = {
static PyMethodDef THPDtype_methods[] = {
{"__reduce__", (PyCFunction)THPDtype_reduce, METH_NOARGS, nullptr},
{NULL} /* Sentinel */
{nullptr} /* Sentinel */
};
PyObject *THPDtype_repr(THPDtype *self)

View File

@ -12,7 +12,7 @@ PyObject *THPException_FatalError;
#define ASSERT_TRUE(cond) if (!(cond)) return false
bool THPException_init(PyObject *module)
{
ASSERT_TRUE(THPException_FatalError = PyErr_NewException("torch.FatalError", NULL, NULL));
ASSERT_TRUE(THPException_FatalError = PyErr_NewException("torch.FatalError", nullptr, nullptr));
ASSERT_TRUE(PyModule_AddObject(module, "FatalError", THPException_FatalError) == 0);
return true;
}

View File

@ -29,7 +29,7 @@
return retval; \
}
#define END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS_RET(NULL)
#define END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS_RET(nullptr)
extern PyObject *THPException_FatalError;

View File

@ -14,7 +14,7 @@
using namespace at;
using namespace torch;
PyObject *THPGeneratorClass = NULL;
PyObject *THPGeneratorClass = nullptr;
PyObject * THPGenerator_New()
{
@ -22,9 +22,9 @@ PyObject * THPGenerator_New()
if (!args) {
PyErr_SetString(PyExc_RuntimeError, "Could not create a new generator object - "
"failed to allocate argument tuple");
return NULL;
return nullptr;
}
PyObject *result = PyObject_Call((PyObject*)THPGeneratorClass, args, NULL);
PyObject *result = PyObject_Call((PyObject*)THPGeneratorClass, args, nullptr);
Py_DECREF(args);
return result;
}
@ -52,7 +52,7 @@ static PyObject * THPGenerator_pynew(PyTypeObject *type, PyObject *args, PyObjec
HANDLE_TH_ERRORS
if ((args && PyTuple_Size(args) != 0) || kwargs) {
THPUtils_setError("torch.Generator constructor doesn't accept any arguments");
return NULL;
return nullptr;
}
THPGeneratorPtr self((THPGenerator *)type->tp_alloc(type, 0));
// having to pick a specific type rather than just a backend here is strange,
@ -120,21 +120,21 @@ static PyObject * THPGenerator_initialSeed(THPGenerator *self)
}
static PyMethodDef THPGenerator_methods[] = {
{"get_state", (PyCFunction)THPGenerator_getState, METH_NOARGS, NULL},
{"set_state", (PyCFunction)THPGenerator_setState, METH_O, NULL},
{"manual_seed", (PyCFunction)THPGenerator_manualSeed, METH_O, NULL},
{"seed", (PyCFunction)THPGenerator_seed, METH_NOARGS, NULL},
{"initial_seed", (PyCFunction)THPGenerator_initialSeed, METH_NOARGS, NULL},
{NULL}
{"get_state", (PyCFunction)THPGenerator_getState, METH_NOARGS, nullptr},
{"set_state", (PyCFunction)THPGenerator_setState, METH_O, nullptr},
{"manual_seed", (PyCFunction)THPGenerator_manualSeed, METH_O, nullptr},
{"seed", (PyCFunction)THPGenerator_seed, METH_NOARGS, nullptr},
{"initial_seed", (PyCFunction)THPGenerator_initialSeed, METH_NOARGS, nullptr},
{nullptr}
};
static struct PyMemberDef THPGenerator_members[] = {
{(char*)"_cdata", T_ULONGLONG, offsetof(THPGenerator, cdata), READONLY, NULL},
{NULL}
{(char*)"_cdata", T_ULONGLONG, offsetof(THPGenerator, cdata), READONLY, nullptr},
{nullptr}
};
PyTypeObject THPGeneratorType = {
PyVarObject_HEAD_INIT(NULL, 0)
PyVarObject_HEAD_INIT(nullptr, 0)
"torch._C.Generator", /* tp_name */
sizeof(THPGenerator), /* tp_basicsize */
0, /* tp_itemsize */
@ -154,7 +154,7 @@ PyTypeObject THPGeneratorType = {
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
NULL, /* tp_doc */
nullptr, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */

View File

@ -53,7 +53,7 @@ namespace py = pybind11;
PyObject* module;
THPGenerator *THPDefaultGenerator = NULL;
THPGenerator *THPDefaultGenerator = nullptr;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
@ -63,7 +63,7 @@ static PyObject * THPModule_initNames(PyObject *self, PyObject *arg)
static std::vector<std::string> names;
THPObjectPtr types(PySequence_Fast(arg, "expected a sequence"));
if (!types) return NULL;
if (!types) return nullptr;
int num_classes = PySequence_Fast_GET_SIZE(types.get());
names.reserve(names.size() + num_classes);
@ -73,7 +73,7 @@ static PyObject * THPModule_initNames(PyObject *self, PyObject *arg)
PyTypeObject* type = (PyTypeObject*)obj;
THPObjectPtr module_name(PyObject_GetAttrString(obj, "__module__"));
if (!module_name) return NULL;
if (!module_name) return nullptr;
THPUtils_assert(THPUtils_checkString(module_name.get()),
"expected __module__ to be a string");
std::string name = THPUtils_unpackString(module_name.get());
@ -89,7 +89,7 @@ static PyObject * THPModule_initExtension(PyObject *_unused, PyObject *shm_manag
HANDLE_TH_ERRORS
if (!THPUtils_checkString(shm_manager_path)) {
THPUtils_setError("initialization error - expected bytes/string object as shm_manager_path!");
return NULL;
return nullptr;
}
torch::utils::initializeLayouts();
torch::utils::initializeDtypes();
@ -172,8 +172,8 @@ PyObject * THPModule_setDefaultDtype(PyObject *_unused, PyObject *dtype)
PyObject *THPModule_safeCall(PyObject *_unused, PyObject *args, PyObject *kwargs)
{
PyObject *result = NULL;
PyObject *args_slice = NULL;
PyObject *result = nullptr;
PyObject *args_slice = nullptr;
PyThreadState *thread_state = PyThreadState_Get();
Py_ssize_t num_args = args ? PyTuple_Size(args) : 0;
THPUtils_assert(num_args > 0, "expected at least one argument");
@ -197,7 +197,7 @@ PyObject *THPModule_addDocStr(PyObject *_unused, PyObject *args)
PyObject *obj;
PyObject *doc_obj;
if (!PyArg_ParseTuple(args, "OO", &obj, &doc_obj)) {
return NULL;
return nullptr;
}
const char* doc_str = "<invalid string>";
@ -403,36 +403,36 @@ PyObject *THPModule_isDefaultTypeCuda(PyObject *_unused, PyObject *arg) {
}
static PyMethodDef TorchMethods[] = {
{"_initExtension", (PyCFunction)THPModule_initExtension, METH_O, NULL},
{"_autograd_init", (PyCFunction)THPAutograd_initExtension, METH_NOARGS, NULL},
{"_add_docstr", (PyCFunction)THPModule_addDocStr, METH_VARARGS, NULL},
{"_init_names", (PyCFunction)THPModule_initNames, METH_O, NULL},
{"_has_distributed",(PyCFunction)THPModule_hasDistributed, METH_NOARGS, NULL},
{"_safe_call", (PyCFunction)THPModule_safeCall, METH_VARARGS | METH_KEYWORDS, NULL},
{"_set_default_tensor_type", (PyCFunction)THPModule_setDefaultTensorType, METH_O, NULL},
{"_set_default_dtype", (PyCFunction)THPModule_setDefaultDtype, METH_O, NULL},
{"_infer_size", (PyCFunction)THPModule_inferSize, METH_VARARGS, NULL},
{"_crash_if_csrc_asan", (PyCFunction)THPModule_crashIfCsrcASAN, METH_O, NULL},
{"_crash_if_csrc_ubsan", (PyCFunction)THPModule_crashIfCsrcUBSAN, METH_O, NULL},
{"_crash_if_aten_asan", (PyCFunction)THPModule_crashIfATenASAN, METH_O, NULL},
{"_set_backcompat_broadcast_warn", (PyCFunction)THPModule_setBackcompatBroadcastWarn, METH_O, NULL},
{"_get_backcompat_broadcast_warn", (PyCFunction)THPModule_getBackcompatBroadcastWarn, METH_NOARGS, NULL},
{"_set_backcompat_keepdim_warn", (PyCFunction)THPModule_setBackcompatKeepdimWarn, METH_O, NULL},
{"_get_backcompat_keepdim_warn", (PyCFunction)THPModule_getBackcompatKeepdimWarn, METH_NOARGS, NULL},
{"get_num_threads", (PyCFunction)THPModule_getNumThreads, METH_NOARGS, NULL},
{"set_num_threads", (PyCFunction)THPModule_setNumThreads, METH_O, NULL},
{"_get_cudnn_enabled", (PyCFunction)THPModule_userEnabledCuDNN, METH_NOARGS, NULL},
{"_set_cudnn_enabled", (PyCFunction)THPModule_setUserEnabledCuDNN, METH_O, NULL},
{"_get_cudnn_benchmark", (PyCFunction)THPModule_benchmarkCuDNN, METH_NOARGS, NULL},
{"_set_cudnn_benchmark", (PyCFunction)THPModule_setBenchmarkCuDNN, METH_O, NULL},
{"_get_cudnn_deterministic", (PyCFunction)THPModule_deterministicCuDNN, METH_NOARGS, NULL},
{"_set_cudnn_deterministic", (PyCFunction)THPModule_setDeterministicCuDNN, METH_O, NULL},
{"_to_dlpack", (PyCFunction)THPModule_toDLPack, METH_O, NULL},
{"_from_dlpack", (PyCFunction)THPModule_fromDLPack, METH_O, NULL},
{"set_flush_denormal", (PyCFunction)THPModule_setFlushDenormal, METH_O, NULL},
{"get_default_dtype", (PyCFunction)THPModule_getDefaultDtype, METH_NOARGS, NULL},
{"_is_default_type_cuda", (PyCFunction)THPModule_isDefaultTypeCuda, METH_NOARGS, NULL},
{NULL, NULL, 0, NULL}
{"_initExtension", (PyCFunction)THPModule_initExtension, METH_O, nullptr},
{"_autograd_init", (PyCFunction)THPAutograd_initExtension, METH_NOARGS, nullptr},
{"_add_docstr", (PyCFunction)THPModule_addDocStr, METH_VARARGS, nullptr},
{"_init_names", (PyCFunction)THPModule_initNames, METH_O, nullptr},
{"_has_distributed",(PyCFunction)THPModule_hasDistributed, METH_NOARGS, nullptr},
{"_safe_call", (PyCFunction)THPModule_safeCall, METH_VARARGS | METH_KEYWORDS, nullptr},
{"_set_default_tensor_type", (PyCFunction)THPModule_setDefaultTensorType, METH_O, nullptr},
{"_set_default_dtype", (PyCFunction)THPModule_setDefaultDtype, METH_O, nullptr},
{"_infer_size", (PyCFunction)THPModule_inferSize, METH_VARARGS, nullptr},
{"_crash_if_csrc_asan", (PyCFunction)THPModule_crashIfCsrcASAN, METH_O, nullptr},
{"_crash_if_csrc_ubsan", (PyCFunction)THPModule_crashIfCsrcUBSAN, METH_O, nullptr},
{"_crash_if_aten_asan", (PyCFunction)THPModule_crashIfATenASAN, METH_O, nullptr},
{"_set_backcompat_broadcast_warn", (PyCFunction)THPModule_setBackcompatBroadcastWarn, METH_O, nullptr},
{"_get_backcompat_broadcast_warn", (PyCFunction)THPModule_getBackcompatBroadcastWarn, METH_NOARGS, nullptr},
{"_set_backcompat_keepdim_warn", (PyCFunction)THPModule_setBackcompatKeepdimWarn, METH_O, nullptr},
{"_get_backcompat_keepdim_warn", (PyCFunction)THPModule_getBackcompatKeepdimWarn, METH_NOARGS, nullptr},
{"get_num_threads", (PyCFunction)THPModule_getNumThreads, METH_NOARGS, nullptr},
{"set_num_threads", (PyCFunction)THPModule_setNumThreads, METH_O, nullptr},
{"_get_cudnn_enabled", (PyCFunction)THPModule_userEnabledCuDNN, METH_NOARGS, nullptr},
{"_set_cudnn_enabled", (PyCFunction)THPModule_setUserEnabledCuDNN, METH_O, nullptr},
{"_get_cudnn_benchmark", (PyCFunction)THPModule_benchmarkCuDNN, METH_NOARGS, nullptr},
{"_set_cudnn_benchmark", (PyCFunction)THPModule_setBenchmarkCuDNN, METH_O, nullptr},
{"_get_cudnn_deterministic", (PyCFunction)THPModule_deterministicCuDNN, METH_NOARGS, nullptr},
{"_set_cudnn_deterministic", (PyCFunction)THPModule_setDeterministicCuDNN, METH_O, nullptr},
{"_to_dlpack", (PyCFunction)THPModule_toDLPack, METH_O, nullptr},
{"_from_dlpack", (PyCFunction)THPModule_fromDLPack, METH_O, nullptr},
{"set_flush_denormal", (PyCFunction)THPModule_setFlushDenormal, METH_O, nullptr},
{"get_default_dtype", (PyCFunction)THPModule_getDefaultDtype, METH_NOARGS, nullptr},
{"_is_default_type_cuda", (PyCFunction)THPModule_isDefaultTypeCuda, METH_NOARGS, nullptr},
{nullptr, nullptr, 0, nullptr}
};
bool THCPDoubleStorage_init(PyObject *module);
@ -487,8 +487,8 @@ static PyObject * THCUDNN_cudnn_version(PyObject *self, PyObject *args)
}
static PyMethodDef _THCUDNN_methods[] = {
{"_cudnn_version", (PyCFunction)THCUDNN_cudnn_version, METH_VARARGS, NULL},
{NULL}
{"_cudnn_version", (PyCFunction)THCUDNN_cudnn_version, METH_VARARGS, nullptr},
{nullptr}
};
PyMethodDef* THCUDNN_methods() {
@ -508,7 +508,7 @@ static PyObject* initModule() {
HANDLE_TH_ERRORS
THInferNumThreads();
#define ASSERT_TRUE(cmd) if (!(cmd)) return NULL
#define ASSERT_TRUE(cmd) if (!(cmd)) return nullptr
THPUtils_addPyMethodDefs(methods, TorchMethods);
THPUtils_addPyMethodDefs(methods, DataLoaderMethods);
@ -532,7 +532,7 @@ static PyObject* initModule() {
static struct PyModuleDef torchmodule = {
PyModuleDef_HEAD_INIT,
"torch._C",
NULL,
nullptr,
-1,
methods.data()
};
@ -634,7 +634,7 @@ static PyObject* initModule() {
ASSERT_TRUE(set_module_attr("default_generator", (PyObject*)THPDefaultGenerator, /* incref= */ false));
#ifdef USE_NUMPY
if (_import_array() < 0) return NULL;
if (_import_array() < 0) return nullptr;
#endif
torch::nn::init__THNN(module);

View File

@ -2,7 +2,7 @@
#include "ATen/Utils.h"
#include <functional>
static PyObject* THPWrapperClass = NULL;
static PyObject* THPWrapperClass = nullptr;
struct THPWrapper {
PyObject_HEAD
@ -14,9 +14,9 @@ PyObject * THPWrapper_New(void *data, void (*destructor)(void*))
{
PyObject *args = PyTuple_New(0);
if (!args) {
return NULL;
return nullptr;
}
PyObject *result = PyObject_Call(THPWrapperClass, args, NULL);
PyObject *result = PyObject_Call(THPWrapperClass, args, nullptr);
if (result) {
THPWrapper* wrapper = (THPWrapper*) result;
wrapper->data = data;
@ -40,8 +40,8 @@ static PyObject * THPWrapper_pynew(PyTypeObject *type, PyObject *args, PyObject
{
PyObject* self = type->tp_alloc(type, 0);
THPWrapper* wrapper = (THPWrapper*) self;
wrapper->data = NULL;
wrapper->destructor = NULL;
wrapper->data = nullptr;
wrapper->destructor = nullptr;
return self;
}
@ -52,7 +52,7 @@ static void THPWrapper_dealloc(THPWrapper* self)
}
PyTypeObject THPWrapperType = {
PyVarObject_HEAD_INIT(NULL, 0)
PyVarObject_HEAD_INIT(nullptr, 0)
"torch._C._PtrWrapper", /* tp_name */
sizeof(THPWrapper), /* tp_basicsize */
0, /* tp_itemsize */
@ -72,7 +72,7 @@ PyTypeObject THPWrapperType = {
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
NULL, /* tp_doc */
nullptr, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */

View File

@ -19,18 +19,18 @@ important gotchas:
## Notes
### Note [Storage is not NULL]
### Note [Storage is not nullptr]
Historically, Torch supported NULL storage, as a minor optimization to
Historically, Torch supported nullptr storage, as a minor optimization to
avoid having to allocate a storage object when it would be empty.
However, this is actually a confusing special case to deal with, so
by-in-large, PyTorch assumes that, in fact, storage is never NULL.
by-in-large, PyTorch assumes that, in fact, storage is never nullptr.
One important case where this assumption is important is when tracking
the CUDA device a tensor is stored in: this information is stored
solely in the storage, so if a storage is NULL, we lose this information.
solely in the storage, so if a storage is nullptr, we lose this information.
Although storage is never NULL, the data field of THStorage may be NULL. This
Although storage is never nullptr, the data field of THStorage may be nullptr. This
mostly occurs when we want to pre-allocate an output tensor struct, but then
have it be resized and filled with data by some operator: there's no point in
allocating data for it in this case!

View File

@ -95,9 +95,9 @@ template<typename FnType, FnType fn, typename ...Args>
static PyObject* wrap_tuple_fn(Args ... args)
{
THPObjectPtr result((*fn)(std::forward<Args>(args)...));
if (!result) return NULL;
if (!result) return nullptr;
if (PyTuple_Check(result.get())) {
return PyObject_CallFunctionObjArgs((PyObject*)&THPSizeType, result.get(), NULL);
return PyObject_CallFunctionObjArgs((PyObject*)&THPSizeType, result.get(), nullptr);
}
return result.release();
}
@ -137,7 +137,7 @@ static PyMappingMethods THPSize_as_mapping = {
PyTypeObject THPSizeType = {
PyVarObject_HEAD_INIT(NULL, 0)
PyVarObject_HEAD_INIT(nullptr, 0)
"torch.Size", /* tp_name */
sizeof(THPSize), /* tp_basicsize */
0, /* tp_itemsize */
@ -157,7 +157,7 @@ PyTypeObject THPSizeType = {
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
NULL, /* tp_doc */
nullptr, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */

View File

@ -417,8 +417,8 @@ static PyMappingMethods THPVariable_as_mapping = {
};
static PyMethodDef extra_methods[] = {
{"_make_subclass", (PyCFunction)THPVariable_make_subclass, METH_STATIC | METH_VARARGS | METH_KEYWORDS, NULL},
{NULL}
{"_make_subclass", (PyCFunction)THPVariable_make_subclass, METH_STATIC | METH_VARARGS | METH_KEYWORDS, nullptr},
{nullptr}
};
PyTypeObject THPVariableType = {

View File

@ -40,14 +40,14 @@ inline PyObject * THPStorageCopyMethod(const THPCopyList& v, PyObject *self, PyO
{
PyObject *src;
int non_blocking = 0;
static char *kwlist[] = {"source", "non_blocking", NULL};
static char *kwlist[] = {"source", "non_blocking", nullptr};
// use int as parse type because bool not available in python2.
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|i:copy_", kwlist, &src, &non_blocking)) {
return NULL;
return nullptr;
}
if (!THPCopy(v, self, src, non_blocking, false)) {
return NULL;
return nullptr;
}
Py_INCREF(self);
@ -65,7 +65,7 @@ void THPInsertStorageCopyFunction(
auto dst = ((THPStorageDst*)dst_)->cdata;
auto src = ((THPStorageSrc*)src_)->cdata;
PyThreadState *_save = NULL;
PyThreadState *_save = nullptr;
try {
Py_UNBLOCK_THREADS;
copyFunc(LIBRARY_STATE dst, src);

View File

@ -103,7 +103,7 @@ PyObject * THCPModule_getDriverVersion(PyObject *self)
PyErr_Format(PyExc_RuntimeError,
"Error calling cudaDriverGetVersion: %d %s",
err, cudaGetErrorString(err));
return NULL;
return nullptr;
}
return PyLong_FromLong((int64_t) driverVersion);
}
@ -380,44 +380,44 @@ PyObject * THCPModule_getCurrentBlasHandle_wrap(PyObject *self)
}
static struct PyMethodDef _THCPModule_methods[] = {
{"_cuda_init", (PyCFunction)THCPModule_initExtension, METH_NOARGS, NULL},
{"_cuda_setDevice", (PyCFunction)THCPModule_setDevice_wrap, METH_O, NULL},
{"_cuda_getDevice", (PyCFunction)THCPModule_getDevice_wrap, METH_NOARGS, NULL},
{"_cuda_getDeviceCount", (PyCFunction)THCPModule_getDeviceCount_wrap, METH_NOARGS, NULL},
{"_cuda_getCurrentStream", (PyCFunction)THCPModule_getCurrentStream_wrap, METH_NOARGS, NULL},
{"_cuda_getCurrentBlasHandle", (PyCFunction)THCPModule_getCurrentBlasHandle_wrap, METH_NOARGS, NULL},
{"_cuda_setStream", (PyCFunction)THCPModule_setStream_wrap, METH_O, NULL},
{"_cuda_isDriverSufficient", (PyCFunction)THCPModule_isDriverSufficient, METH_NOARGS, NULL},
{"_cuda_getDriverVersion", (PyCFunction)THCPModule_getDriverVersion, METH_NOARGS, NULL},
{"_cuda_getCompiledVersion", (PyCFunction)THCPModule_getCompiledVersion, METH_NOARGS, NULL},
{"_cuda_getRNGState", (PyCFunction)THCPModule_getRNGState, METH_NOARGS, NULL},
{"_cuda_setRNGState", (PyCFunction)THCPModule_setRNGState, METH_O, NULL},
{"_cuda_emptyCache", (PyCFunction) THCPModule_emptyCache, METH_NOARGS, NULL},
{"_cuda_memoryAllocated", (PyCFunction) THCPModule_memoryAllocated, METH_O, NULL},
{"_cuda_maxMemoryAllocated", (PyCFunction) THCPModule_maxMemoryAllocated, METH_O, NULL},
{"_cuda_memoryCached", (PyCFunction) THCPModule_memoryCached, METH_O, NULL},
{"_cuda_maxMemoryCached", (PyCFunction) THCPModule_maxMemoryCached, METH_O, NULL},
{"_cuda_manualSeed", (PyCFunction)THCPModule_manualSeed, METH_O, NULL},
{"_cuda_manualSeedAll", (PyCFunction)THCPModule_manualSeedAll, METH_O, NULL},
{"_cuda_seed", (PyCFunction)THCPModule_seed, METH_NOARGS, NULL},
{"_cuda_seedAll", (PyCFunction)THCPModule_seedAll, METH_NOARGS, NULL},
{"_cuda_initialSeed", (PyCFunction)THCPModule_initialSeed, METH_NOARGS, NULL},
{"_cuda_cudaHostAllocator", (PyCFunction)THCPModule_cudaHostAllocator, METH_NOARGS, NULL},
{"_cuda_synchronize", (PyCFunction)THCPModule_cudaSynchronize, METH_NOARGS, NULL},
{"_cuda_sleep", (PyCFunction)THCPModule_cudaSleep, METH_O, NULL},
{"_cuda_lock_mutex", (PyCFunction)THCPModule_cudaLockMutex, METH_NOARGS, NULL},
{"_cuda_unlock_mutex", (PyCFunction)THCPModule_cudaUnlockMutex, METH_NOARGS, NULL},
{"_cuda_init", (PyCFunction)THCPModule_initExtension, METH_NOARGS, nullptr},
{"_cuda_setDevice", (PyCFunction)THCPModule_setDevice_wrap, METH_O, nullptr},
{"_cuda_getDevice", (PyCFunction)THCPModule_getDevice_wrap, METH_NOARGS, nullptr},
{"_cuda_getDeviceCount", (PyCFunction)THCPModule_getDeviceCount_wrap, METH_NOARGS, nullptr},
{"_cuda_getCurrentStream", (PyCFunction)THCPModule_getCurrentStream_wrap, METH_NOARGS, nullptr},
{"_cuda_getCurrentBlasHandle", (PyCFunction)THCPModule_getCurrentBlasHandle_wrap, METH_NOARGS, nullptr},
{"_cuda_setStream", (PyCFunction)THCPModule_setStream_wrap, METH_O, nullptr},
{"_cuda_isDriverSufficient", (PyCFunction)THCPModule_isDriverSufficient, METH_NOARGS, nullptr},
{"_cuda_getDriverVersion", (PyCFunction)THCPModule_getDriverVersion, METH_NOARGS, nullptr},
{"_cuda_getCompiledVersion", (PyCFunction)THCPModule_getCompiledVersion, METH_NOARGS, nullptr},
{"_cuda_getRNGState", (PyCFunction)THCPModule_getRNGState, METH_NOARGS, nullptr},
{"_cuda_setRNGState", (PyCFunction)THCPModule_setRNGState, METH_O, nullptr},
{"_cuda_emptyCache", (PyCFunction) THCPModule_emptyCache, METH_NOARGS, nullptr},
{"_cuda_memoryAllocated", (PyCFunction) THCPModule_memoryAllocated, METH_O, nullptr},
{"_cuda_maxMemoryAllocated", (PyCFunction) THCPModule_maxMemoryAllocated, METH_O, nullptr},
{"_cuda_memoryCached", (PyCFunction) THCPModule_memoryCached, METH_O, nullptr},
{"_cuda_maxMemoryCached", (PyCFunction) THCPModule_maxMemoryCached, METH_O, nullptr},
{"_cuda_manualSeed", (PyCFunction)THCPModule_manualSeed, METH_O, nullptr},
{"_cuda_manualSeedAll", (PyCFunction)THCPModule_manualSeedAll, METH_O, nullptr},
{"_cuda_seed", (PyCFunction)THCPModule_seed, METH_NOARGS, nullptr},
{"_cuda_seedAll", (PyCFunction)THCPModule_seedAll, METH_NOARGS, nullptr},
{"_cuda_initialSeed", (PyCFunction)THCPModule_initialSeed, METH_NOARGS, nullptr},
{"_cuda_cudaHostAllocator", (PyCFunction)THCPModule_cudaHostAllocator, METH_NOARGS, nullptr},
{"_cuda_synchronize", (PyCFunction)THCPModule_cudaSynchronize, METH_NOARGS, nullptr},
{"_cuda_sleep", (PyCFunction)THCPModule_cudaSleep, METH_O, nullptr},
{"_cuda_lock_mutex", (PyCFunction)THCPModule_cudaLockMutex, METH_NOARGS, nullptr},
{"_cuda_unlock_mutex", (PyCFunction)THCPModule_cudaUnlockMutex, METH_NOARGS, nullptr},
#ifdef USE_NCCL
{"_nccl_version", (PyCFunction)THCPModule_nccl_version, METH_NOARGS, NULL},
{"_nccl_unique_id", (PyCFunction)THCPModule_nccl_unique_id, METH_NOARGS, NULL},
{"_nccl_init_rank", (PyCFunction)THCPModule_nccl_init_rank, METH_VARARGS, NULL},
{"_nccl_reduce", (PyCFunction)THCPModule_nccl_reduce, METH_VARARGS, NULL},
{"_nccl_all_reduce", (PyCFunction)THCPModule_nccl_all_reduce, METH_VARARGS, NULL},
{"_nccl_broadcast", (PyCFunction)THCPModule_nccl_broadcast, METH_VARARGS, NULL},
{"_nccl_all_gather", (PyCFunction)THCPModule_nccl_all_gather, METH_VARARGS, NULL},
{"_nccl_reduce_scatter", (PyCFunction)THCPModule_nccl_reduce_scatter, METH_VARARGS, NULL},
{"_nccl_version", (PyCFunction)THCPModule_nccl_version, METH_NOARGS, nullptr},
{"_nccl_unique_id", (PyCFunction)THCPModule_nccl_unique_id, METH_NOARGS, nullptr},
{"_nccl_init_rank", (PyCFunction)THCPModule_nccl_init_rank, METH_VARARGS, nullptr},
{"_nccl_reduce", (PyCFunction)THCPModule_nccl_reduce, METH_VARARGS, nullptr},
{"_nccl_all_reduce", (PyCFunction)THCPModule_nccl_all_reduce, METH_VARARGS, nullptr},
{"_nccl_broadcast", (PyCFunction)THCPModule_nccl_broadcast, METH_VARARGS, nullptr},
{"_nccl_all_gather", (PyCFunction)THCPModule_nccl_all_gather, METH_VARARGS, nullptr},
{"_nccl_reduce_scatter", (PyCFunction)THCPModule_nccl_reduce_scatter, METH_VARARGS, nullptr},
#endif
{NULL}
{nullptr}
};
PyMethodDef* THCPModule_methods() {

View File

@ -9,7 +9,7 @@
#include <structmember.h>
#include <cuda_runtime_api.h>
PyObject *THCPStreamClass = NULL;
PyObject *THCPStreamClass = nullptr;
static PyObject * THCPStream_pynew(PyTypeObject *type, PyObject *args, PyObject *kwargs)
{
@ -21,14 +21,14 @@ static PyObject * THCPStream_pynew(PyTypeObject *type, PyObject *args, PyObject
int priority = 0;
unsigned long long cdata = 0;
static char *kwlist[] = {"priority", "_cdata", NULL};
static char *kwlist[] = {"priority", "_cdata", nullptr};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iK", kwlist, &priority, &cdata)) {
return NULL;
return nullptr;
}
THPObjectPtr ptr(type->tp_alloc(type, 0));
if (!ptr) {
return NULL;
return nullptr;
}
THCStream* stream;
@ -42,7 +42,7 @@ static PyObject * THCPStream_pynew(PyTypeObject *type, PyObject *args, PyObject
THCPStream* self = (THCPStream *)ptr.get();
self->cdata = stream;
self->device = stream ? THCStream_device(stream) : current_device;
self->cuda_stream = stream ? THCStream_stream(stream) : NULL;
self->cuda_stream = stream ? THCStream_stream(stream) : nullptr;
return (PyObject *)ptr.release();
END_HANDLE_TH_ERRORS
}
@ -54,18 +54,18 @@ static void THCPStream_dealloc(THCPStream* self)
}
static struct PyMemberDef THCPStream_members[] = {
{(char*)"_cdata", T_ULONGLONG, offsetof(THCPStream, cdata), READONLY, NULL},
{(char*)"device", T_INT, offsetof(THCPStream, device), READONLY, NULL},
{(char*)"cuda_stream", T_ULONGLONG, offsetof(THCPStream, cuda_stream), READONLY, NULL},
{NULL}
{(char*)"_cdata", T_ULONGLONG, offsetof(THCPStream, cdata), READONLY, nullptr},
{(char*)"device", T_INT, offsetof(THCPStream, device), READONLY, nullptr},
{(char*)"cuda_stream", T_ULONGLONG, offsetof(THCPStream, cuda_stream), READONLY, nullptr},
{nullptr}
};
static PyMethodDef THCPStream_methods[] = {
{NULL}
{nullptr}
};
PyTypeObject THCPStreamType = {
PyVarObject_HEAD_INIT(NULL, 0)
PyVarObject_HEAD_INIT(nullptr, 0)
"torch._C._CudaStreamBase", /* tp_name */
sizeof(THCPStream), /* tp_basicsize */
0, /* tp_itemsize */
@ -85,7 +85,7 @@ PyTypeObject THCPStreamType = {
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
NULL, /* tp_doc */
nullptr, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */

View File

@ -191,7 +191,7 @@ void broadcast(TensorList tensors, const stream_list& streams, const comm_list&
for (size_t i = 0, num_tensors = tensors.size(); i < num_tensors; i++) {
device_guard.set_index(tensors[i].get_device());
// TODO: use current stream
const auto stream = (streams.empty() || !streams[i]) ? NULL : THCStream_stream(streams[i]);
const auto stream = (streams.empty() || !streams[i]) ? nullptr : THCStream_stream(streams[i]);
CHECK(ncclBcast(tensors[i].data_ptr(), numel, data_type, 0, comms[i], stream));
}
#else

View File

@ -99,7 +99,7 @@ PyObject * THCPModule_nccl_init_rank(PyObject *self, PyObject *args) {
int rank;
if (!PyArg_ParseTuple(args, "is#i:nccl_init_rank", &nranks, &id, &id_len, &rank)) {
return NULL;
return nullptr;
}
THPUtils_assert(id_len == NCCL_UNIQUE_ID_BYTES,
"invalid unqiue_id (expected %d bytes, got %zd)",
@ -121,10 +121,10 @@ PyObject * THCPModule_nccl_reduce(PyObject *self, PyObject *args) {
int root, op;
if (!PyArg_ParseTuple(args, "OOiiOO", &_inputs, &_outputs, &root, &op, &_streams, &_comms)) {
THPUtils_invalidArguments(args, NULL, "nccl_reduce", 1,
THPUtils_invalidArguments(args, nullptr, "nccl_reduce", 1,
"(sequence[Tensor] inputs, sequence[Tensor] outputs, int root,"
" int op, sequence[torch.cuda.Stream or None]");
return NULL;
return nullptr;
}
std::vector<at::Tensor> inputs = extract_tensors(_inputs);
@ -148,7 +148,7 @@ PyObject * THCPModule_nccl_reduce(PyObject *self, PyObject *args) {
for (size_t i = 0; i < len; i++) {
int device = inputs[i].get_device();
device_guard.set_index(device);
auto stream = (streams[i] == NULL) ? NULL : THCStream_stream(streams[i]);
auto stream = (streams[i] == nullptr) ? nullptr : THCStream_stream(streams[i]);
CHECK(ncclReduce(inputs[i].data_ptr(), outputs[i].data_ptr(),
count, data_type, (ncclRedOp_t) op, root, comms[i], stream));
}
@ -164,11 +164,11 @@ PyObject * THCPModule_nccl_all_reduce(PyObject *self, PyObject *args) {
int op;
if (!PyArg_ParseTuple(args, "OOiOO", &_inputs, &_outputs, &op, &_streams, &_comms)) {
THPUtils_invalidArguments(args, NULL, "nccl_all_reduce", 1,
THPUtils_invalidArguments(args, nullptr, "nccl_all_reduce", 1,
"(sequence[Tensor] inputs, sequence[Tensor] outputs, int op,"
" sequence[torch.cuda.Stream] streams,"
" sequence[torch.cuda.nccl.Communicator] comms)");
return NULL;
return nullptr;
}
std::vector<at::Tensor> inputs = extract_tensors(_inputs);
@ -190,7 +190,7 @@ PyObject * THCPModule_nccl_all_reduce(PyObject *self, PyObject *args) {
for (size_t i = 0; i < len; i++) {
int device = inputs[i].get_device();
device_guard.set_index(device);
auto stream = (streams[i] == NULL) ? NULL : THCStream_stream(streams[i]);
auto stream = (streams[i] == nullptr) ? nullptr : THCStream_stream(streams[i]);
CHECK(ncclAllReduce(inputs[i].data_ptr(), outputs[i].data_ptr(),
count, data_type, (ncclRedOp_t) op, comms[i], stream));
}
@ -206,9 +206,9 @@ PyObject * THCPModule_nccl_broadcast(PyObject *self, PyObject *args) {
int root;
if (!PyArg_ParseTuple(args, "OiOO", &_inputs, &root, &_streams, &_comms)) {
THPUtils_invalidArguments(args, NULL, "nccl_broadcast", 1,
THPUtils_invalidArguments(args, nullptr, "nccl_broadcast", 1,
"(sequence[Tensor] inputs, int root)");
return NULL;
return nullptr;
}
std::vector<at::Tensor> inputs = extract_tensors(_inputs);
@ -229,9 +229,9 @@ PyObject * THCPModule_nccl_all_gather(PyObject *self, PyObject *args) {
PyObject *_inputs, *_outputs, *_streams, *_comms;
if (!PyArg_ParseTuple(args, "OOOO", &_inputs, &_outputs, &_streams, &_comms)) {
THPUtils_invalidArguments(args, NULL, "nccl_all_gather", 1,
THPUtils_invalidArguments(args, nullptr, "nccl_all_gather", 1,
"(sequence[Tensor] inputs, sequence[Tensor] outputs");
return NULL;
return nullptr;
}
std::vector<at::Tensor> inputs = extract_tensors(_inputs);
@ -253,7 +253,7 @@ PyObject * THCPModule_nccl_all_gather(PyObject *self, PyObject *args) {
for (size_t i = 0; i < len; i++) {
int device = inputs[i].get_device();
device_guard.set_index(device);
auto stream = (streams[i] == NULL) ? NULL : THCStream_stream(streams[i]);
auto stream = (streams[i] == nullptr) ? nullptr : THCStream_stream(streams[i]);
#if defined(NCCL_MAJOR) && (NCCL_MAJOR >= 2)
CHECK(ncclAllGather(inputs[i].data_ptr(), outputs[i].data_ptr(),
count, data_type, comms[i], stream));
@ -274,9 +274,9 @@ PyObject * THCPModule_nccl_reduce_scatter(PyObject *self, PyObject *args) {
int op;
if (!PyArg_ParseTuple(args, "OOiOO", &_inputs, &_outputs, &op, &_streams, &_comms)) {
THPUtils_invalidArguments(args, NULL, "nccl_reduce_scatter", 1,
THPUtils_invalidArguments(args, nullptr, "nccl_reduce_scatter", 1,
"(sequence[Tensor] inputs, sequence[Tensor] outputs, int op");
return NULL;
return nullptr;
}
std::vector<at::Tensor> inputs = extract_tensors(_inputs);
@ -298,7 +298,7 @@ PyObject * THCPModule_nccl_reduce_scatter(PyObject *self, PyObject *args) {
for (size_t i = 0; i < len; i++) {
int device = inputs[i].get_device();
device_guard.set_index(device);
auto stream = (streams[i] == NULL) ? NULL : THCStream_stream(streams[i]);
auto stream = (streams[i] == nullptr) ? nullptr : THCStream_stream(streams[i]);
CHECK(ncclReduceScatter(inputs[i].data_ptr(), outputs[i].data_ptr(),
count, data_type, (ncclRedOp_t) op, comms[i], stream));
}

View File

@ -13,8 +13,8 @@ std::vector <THCStream*> THPUtils_PySequence_to_THCStreamList(PyObject *obj) {
if (!PySequence_Check(obj)) {
throw std::runtime_error("Expected a sequence in THPUtils_PySequence_to_THCStreamList");
}
THPObjectPtr seq = THPObjectPtr(PySequence_Fast(obj, NULL));
if (seq.get() == NULL) {
THPObjectPtr seq = THPObjectPtr(PySequence_Fast(obj, nullptr));
if (seq.get() == nullptr) {
throw std::runtime_error("expected PySequence, but got " + std::string(THPUtils_typename(obj)));
}
@ -26,7 +26,7 @@ std::vector <THCStream*> THPUtils_PySequence_to_THCStreamList(PyObject *obj) {
if (PyObject_IsInstance(stream, THCPStreamClass)) {
streams.push_back( ((THCPStream *)stream)->cdata);
} else if (stream == Py_None) {
streams.push_back(NULL);
streams.push_back(nullptr);
} else {
std::runtime_error("Unknown data type found in stream list. Need THCStream or None");
}

View File

@ -36,8 +36,8 @@ PyObject* THDPModule_initProcessGroup(PyObject *_unused, PyObject *args)
!THPUtils_checkLong(PyTuple_GET_ITEM(args, 2)) ||
!THPUtils_checkString(PyTuple_GET_ITEM(args, 3)) ||
!THPUtils_checkLong(PyTuple_GET_ITEM(args, 4))) {
THPUtils_invalidArguments(args, NULL, "init_process_group", 1, "(string backend, string init_method, int world_size, string group_name, int rank)");
return NULL;
THPUtils_invalidArguments(args, nullptr, "init_process_group", 1, "(string backend, string init_method, int world_size, string group_name, int rank)");
return nullptr;
}
std::string backend_name = THPUtils_unpackString(PyTuple_GET_ITEM(args, 0));
@ -141,8 +141,8 @@ static THDGroup _getGroup(PyObject *obj)
PyObject* THDPModule_clearGroupCache(PyObject *_unused, PyObject *args) {
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 1) {
THPUtils_invalidArguments(args, NULL, "clear_group_cache", 1, "(group gr)");
return NULL;
THPUtils_invalidArguments(args, nullptr, "clear_group_cache", 1, "(group gr)");
return nullptr;
}
THDGroup group = _getGroup(PyTuple_GET_ITEM(args, 0));
@ -160,8 +160,8 @@ PyObject* THDPModule_isend(PyObject *_unused, PyObject *args)
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 2 || !THPVariable_Check(PyTuple_GET_ITEM(args, 0)) ||
!THPUtils_checkLong(PyTuple_GET_ITEM(args, 1))) {
THPUtils_invalidArguments(args, NULL, "isend", 1, "(tensor input, int dst_rank)");
return NULL;
THPUtils_invalidArguments(args, nullptr, "isend", 1, "(tensor input, int dst_rank)");
return nullptr;
}
auto desc = THDPModule_makeDescriptor(PyTuple_GET_ITEM(args, 0));
@ -180,8 +180,8 @@ PyObject* THDPModule_irecv(PyObject *_unused, PyObject *args)
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 2 || !THPVariable_Check(PyTuple_GET_ITEM(args, 0)) ||
!THPUtils_checkLong(PyTuple_GET_ITEM(args, 1))) {
THPUtils_invalidArguments(args, NULL, "irecv", 1, "(tensor output, int src_rank)");
return NULL;
THPUtils_invalidArguments(args, nullptr, "irecv", 1, "(tensor output, int src_rank)");
return nullptr;
}
auto desc = THDPModule_makeDescriptor(PyTuple_GET_ITEM(args, 0));
@ -200,8 +200,8 @@ PyObject* THDPModule_send(PyObject *_unused, PyObject *args)
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 2 || !THPVariable_Check(PyTuple_GET_ITEM(args, 0)) ||
!THPUtils_checkLong(PyTuple_GET_ITEM(args, 1))) {
THPUtils_invalidArguments(args, NULL, "send", 1, "(tensor input, int dst_rank)");
return NULL;
THPUtils_invalidArguments(args, nullptr, "send", 1, "(tensor input, int dst_rank)");
return nullptr;
}
auto desc = THDPModule_makeDescriptor(PyTuple_GET_ITEM(args, 0));
@ -218,8 +218,8 @@ PyObject* THDPModule_recvAnySource(PyObject *_unused, PyObject *_tensor)
{
HANDLE_TH_ERRORS
if (!THPVariable_Check(_tensor)) {
THPUtils_invalidArguments(_tensor, NULL, "recv", 1, "(tensor output)");
return NULL;
THPUtils_invalidArguments(_tensor, nullptr, "recv", 1, "(tensor output)");
return nullptr;
}
auto desc = THDPModule_makeDescriptor(_tensor);
@ -237,8 +237,8 @@ PyObject* THDPModule_recv(PyObject *_unused, PyObject *args)
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 2 || !THPVariable_Check(PyTuple_GET_ITEM(args, 0)) ||
!THPUtils_checkLong(PyTuple_GET_ITEM(args, 1))) {
THPUtils_invalidArguments(args, NULL, "recv", 1, "(tensor output, int src_rank)");
return NULL;
THPUtils_invalidArguments(args, nullptr, "recv", 1, "(tensor output, int src_rank)");
return nullptr;
}
auto desc = THDPModule_makeDescriptor(PyTuple_GET_ITEM(args, 0));
@ -301,7 +301,7 @@ PyObject* THDPModule_allReduceMultiGPU(PyObject *_unused, PyObject *args)
Py_RETURN_NONE;
invalid_arguments:
THPUtils_invalidArguments(args, NULL, "all_reduce_multigpu", 1,
THPUtils_invalidArguments(args, nullptr, "all_reduce_multigpu", 1,
"(list[tensor] in_out, reduce_op op, group gr)");
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
@ -358,7 +358,7 @@ PyObject* THDPModule_reduceMultiGPU(PyObject *_unused, PyObject *args)
Py_RETURN_NONE;
invalid_arguments:
THPUtils_invalidArguments(args, NULL, "reduce_multigpu", 1,
THPUtils_invalidArguments(args, nullptr, "reduce_multigpu", 1,
"(list[tensor] in_out, int dst_rank, "
"reduce_op op, group gr)");
Py_RETURN_NONE;
@ -414,7 +414,7 @@ PyObject* THDPModule_broadcastMultiGPU(PyObject *_unused, PyObject *args)
Py_RETURN_NONE;
invalid_arguments:
THPUtils_invalidArguments(args, NULL, "broadcast_multigpu", 1,
THPUtils_invalidArguments(args, nullptr, "broadcast_multigpu", 1,
"(list[tensor] in_out, int src_rank, group gr)");
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
@ -496,7 +496,7 @@ PyObject* THDPModule_allGatherMultiGPU(PyObject *_unused, PyObject *args)
Py_RETURN_NONE;
invalid_arguments:
THPUtils_invalidArguments(args, NULL, "all_gather_multigpu", 1,
THPUtils_invalidArguments(args, nullptr, "all_gather_multigpu", 1,
"(list[list[tensor]] output, list[tensor] input, group gr)");
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
@ -507,8 +507,8 @@ PyObject* THDPModule_allReduce(PyObject *_unused, PyObject *args)
{
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 3 || !THPVariable_Check(PyTuple_GET_ITEM(args, 0))) {
THPUtils_invalidArguments(args, NULL, "all_reduce", 1, "(tensor in_out, reduce_op op, group gr)");
return NULL;
THPUtils_invalidArguments(args, nullptr, "all_reduce", 1, "(tensor in_out, reduce_op op, group gr)");
return nullptr;
}
THDGroup group = _getGroup(PyTuple_GET_ITEM(args, 2));
@ -527,9 +527,9 @@ PyObject* THDPModule_reduce(PyObject *_unused, PyObject *args)
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 4 || !THPVariable_Check(PyTuple_GET_ITEM(args, 0)) ||
!THPUtils_checkLong(PyTuple_GET_ITEM(args, 1))) {
THPUtils_invalidArguments(args, NULL, "reduce", 1,
THPUtils_invalidArguments(args, nullptr, "reduce", 1,
"(tensor reduced, int dst_rank, reduce_op op, group gr)");
return NULL;
return nullptr;
}
THDGroup group = _getGroup(PyTuple_GET_ITEM(args, 3));
@ -549,9 +549,9 @@ PyObject* THDPModule_broadcast(PyObject *_unused, PyObject *args)
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 3 || !THPVariable_Check(PyTuple_GET_ITEM(args, 0)) ||
!THPUtils_checkLong(PyTuple_GET_ITEM(args, 1))) {
THPUtils_invalidArguments(args, NULL, "broadcast", 1,
THPUtils_invalidArguments(args, nullptr, "broadcast", 1,
"(tensor src_dst, int src_rank, group gr)");
return NULL;
return nullptr;
}
THDGroup group = _getGroup(PyTuple_GET_ITEM(args, 2));
@ -609,7 +609,7 @@ PyObject* THDPModule_allGather(PyObject *_unused, PyObject *args)
Py_RETURN_NONE;
invalid_arguments:
THPUtils_invalidArguments(args, NULL, "allGather", 1,
THPUtils_invalidArguments(args, nullptr, "allGather", 1,
"(list[tensor] output, tensor input, group gr)");
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
@ -619,9 +619,9 @@ PyObject* THDPModule_gatherSend(PyObject *_unused, PyObject *args)
{
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 3 || !THPVariable_Check(PyTuple_GET_ITEM(args, 0))) {
THPUtils_invalidArguments(args, NULL, "gatherSend", 1,
THPUtils_invalidArguments(args, nullptr, "gatherSend", 1,
"(tensor input, int dst_rank, group gr)");
return NULL;
return nullptr;
}
THDGroup group = _getGroup(PyTuple_GET_ITEM(args, 2));
@ -678,9 +678,9 @@ PyObject* THDPModule_gatherRecv(PyObject *_unused, PyObject *args)
Py_RETURN_NONE;
invalid_arguments:
THPUtils_invalidArguments(args, NULL, "gatherRecv", 1,
THPUtils_invalidArguments(args, nullptr, "gatherRecv", 1,
"(list[tensor] output, tensor input, group gr)");
return NULL;
return nullptr;
END_HANDLE_TH_ERRORS
}
@ -727,9 +727,9 @@ PyObject* THDPModule_scatterSend(PyObject *_unused, PyObject *args)
Py_RETURN_NONE;
invalid_arguments:
THPUtils_invalidArguments(args, NULL, "scatterSend", 1,
THPUtils_invalidArguments(args, nullptr, "scatterSend", 1,
"(list[tensor] input, tensor output, group gr)");
return NULL;
return nullptr;
END_HANDLE_TH_ERRORS
}
@ -738,9 +738,9 @@ PyObject* THDPModule_scatterRecv(PyObject *_unused, PyObject *args)
HANDLE_TH_ERRORS
if (PyTuple_GET_SIZE(args) != 3 || !THPVariable_Check(PyTuple_GET_ITEM(args, 0)) ||
!THPUtils_checkLong(PyTuple_GET_ITEM(args, 1))) {
THPUtils_invalidArguments(args, NULL, "scatterRecv", 1,
THPUtils_invalidArguments(args, nullptr, "scatterRecv", 1,
"(tensor output, int src_rank, group gr)");
return NULL;
return nullptr;
}
THDGroup group = _getGroup(PyTuple_GET_ITEM(args, 2));
@ -806,8 +806,8 @@ PyObject* THDPModule_newGroup(PyObject *_unused, PyObject *args)
return PyInt_FromLong(group);
invalid_arguments:
THPUtils_invalidArguments(args, NULL, "newGroup", 1, "(list[int] ranks)");
return NULL;
THPUtils_invalidArguments(args, nullptr, "newGroup", 1, "(list[int] ranks)");
return nullptr;
END_HANDLE_TH_ERRORS
}
@ -815,8 +815,8 @@ PyObject* THDPModule_requestIsCompleted(PyObject *_unused, PyObject *_req)
{
HANDLE_TH_ERRORS
if (!THPWrapper_check(_req)) {
THPUtils_invalidArguments(_req, NULL, "requestIsCompleted", 1, "(request req)");
return NULL;
THPUtils_invalidArguments(_req, nullptr, "requestIsCompleted", 1, "(request req)");
return nullptr;
}
return PyBool_FromLong(THDRequest_isCompleted(_unpackRequest(_req)));
@ -827,8 +827,8 @@ PyObject* THDPModule_requestWait(PyObject *_unused, PyObject *_req)
{
HANDLE_TH_ERRORS
if (!THPWrapper_check(_req)) {
THPUtils_invalidArguments(_req, NULL, "requestWait", 1, "(request req)");
return NULL;
THPUtils_invalidArguments(_req, nullptr, "requestWait", 1, "(request req)");
return nullptr;
}
{
@ -841,8 +841,8 @@ PyObject* THDPModule_requestWait(PyObject *_unused, PyObject *_req)
PyObject* THDPModule_initExtension(PyObject *_unused, PyObject *args) {
if (PyTuple_GET_SIZE(args) != 3) {
THPUtils_invalidArguments(args, NULL, "initExtension", 1, "(bool is_master_worker, reduce_op obj, group obj)");
return NULL;
THPUtils_invalidArguments(args, nullptr, "initExtension", 1, "(bool is_master_worker, reduce_op obj, group obj)");
return nullptr;
}
PyObject* is_master_worker_obj = PyTuple_GET_ITEM(args, 0);
@ -878,37 +878,37 @@ PyObject* THDPModule_initExtension(PyObject *_unused, PyObject *args) {
}
static struct PyMethodDef _THDPModule_methods[] = {
{"_dist_init_extension", (PyCFunction)THDPModule_initExtension, METH_VARARGS, NULL},
{"_dist_init_process_group", (PyCFunction)THDPModule_initProcessGroup, METH_VARARGS, NULL},
{"_dist_destroy_process_group", (PyCFunction)THDPModule_destroyProcessGroup, METH_NOARGS, NULL},
{"_dist_clear_group_cache", (PyCFunction)THDPModule_clearGroupCache, METH_VARARGS, NULL},
{"_dist_init_extension", (PyCFunction)THDPModule_initExtension, METH_VARARGS, nullptr},
{"_dist_init_process_group", (PyCFunction)THDPModule_initProcessGroup, METH_VARARGS, nullptr},
{"_dist_destroy_process_group", (PyCFunction)THDPModule_destroyProcessGroup, METH_NOARGS, nullptr},
{"_dist_clear_group_cache", (PyCFunction)THDPModule_clearGroupCache, METH_VARARGS, nullptr},
#ifdef USE_CUDA
{"_dist_register_stream", (PyCFunction)THDPModule_registerStream, METH_O, NULL},
{"_dist_register_stream", (PyCFunction)THDPModule_registerStream, METH_O, nullptr},
#endif
{"_dist_get_rank", (PyCFunction)THDPModule_getRank, METH_NOARGS, NULL},
{"_dist_get_num_processes", (PyCFunction)THDPModule_getNumProcesses, METH_NOARGS, NULL},
{"_dist_isend", (PyCFunction)THDPModule_isend, METH_VARARGS, NULL},
{"_dist_irecv", (PyCFunction)THDPModule_irecv, METH_VARARGS, NULL},
{"_dist_send", (PyCFunction)THDPModule_send, METH_VARARGS, NULL},
{"_dist_recv_any_source", (PyCFunction)THDPModule_recvAnySource, METH_O, NULL},
{"_dist_recv", (PyCFunction)THDPModule_recv, METH_VARARGS, NULL},
{"_dist_all_reduce", (PyCFunction)THDPModule_allReduce, METH_VARARGS, NULL},
{"_dist_all_reduce_multigpu", (PyCFunction)THDPModule_allReduceMultiGPU, METH_VARARGS, NULL},
{"_dist_reduce", (PyCFunction)THDPModule_reduce, METH_VARARGS, NULL},
{"_dist_reduce_multigpu", (PyCFunction)THDPModule_reduceMultiGPU, METH_VARARGS, NULL},
{"_dist_broadcast", (PyCFunction)THDPModule_broadcast, METH_VARARGS, NULL},
{"_dist_broadcast_multigpu", (PyCFunction)THDPModule_broadcastMultiGPU, METH_VARARGS, NULL},
{"_dist_all_gather", (PyCFunction)THDPModule_allGather, METH_VARARGS, NULL},
{"_dist_all_gather_multigpu", (PyCFunction)THDPModule_allGatherMultiGPU, METH_VARARGS, NULL},
{"_dist_gather_send", (PyCFunction)THDPModule_gatherSend, METH_VARARGS, NULL},
{"_dist_gather_recv", (PyCFunction)THDPModule_gatherRecv, METH_VARARGS, NULL},
{"_dist_scatter_send", (PyCFunction)THDPModule_scatterSend, METH_VARARGS, NULL},
{"_dist_scatter_recv", (PyCFunction)THDPModule_scatterRecv, METH_VARARGS, NULL},
{"_dist_barrier", (PyCFunction)THDPModule_barrier, METH_O, NULL},
{"_dist_new_group", (PyCFunction)THDPModule_newGroup, METH_VARARGS, NULL},
{"_dist_request_is_completed", (PyCFunction)THDPModule_requestIsCompleted, METH_O, NULL},
{"_dist_request_wait", (PyCFunction)THDPModule_requestWait, METH_O, NULL},
{NULL}
{"_dist_get_rank", (PyCFunction)THDPModule_getRank, METH_NOARGS, nullptr},
{"_dist_get_num_processes", (PyCFunction)THDPModule_getNumProcesses, METH_NOARGS, nullptr},
{"_dist_isend", (PyCFunction)THDPModule_isend, METH_VARARGS, nullptr},
{"_dist_irecv", (PyCFunction)THDPModule_irecv, METH_VARARGS, nullptr},
{"_dist_send", (PyCFunction)THDPModule_send, METH_VARARGS, nullptr},
{"_dist_recv_any_source", (PyCFunction)THDPModule_recvAnySource, METH_O, nullptr},
{"_dist_recv", (PyCFunction)THDPModule_recv, METH_VARARGS, nullptr},
{"_dist_all_reduce", (PyCFunction)THDPModule_allReduce, METH_VARARGS, nullptr},
{"_dist_all_reduce_multigpu", (PyCFunction)THDPModule_allReduceMultiGPU, METH_VARARGS, nullptr},
{"_dist_reduce", (PyCFunction)THDPModule_reduce, METH_VARARGS, nullptr},
{"_dist_reduce_multigpu", (PyCFunction)THDPModule_reduceMultiGPU, METH_VARARGS, nullptr},
{"_dist_broadcast", (PyCFunction)THDPModule_broadcast, METH_VARARGS, nullptr},
{"_dist_broadcast_multigpu", (PyCFunction)THDPModule_broadcastMultiGPU, METH_VARARGS, nullptr},
{"_dist_all_gather", (PyCFunction)THDPModule_allGather, METH_VARARGS, nullptr},
{"_dist_all_gather_multigpu", (PyCFunction)THDPModule_allGatherMultiGPU, METH_VARARGS, nullptr},
{"_dist_gather_send", (PyCFunction)THDPModule_gatherSend, METH_VARARGS, nullptr},
{"_dist_gather_recv", (PyCFunction)THDPModule_gatherRecv, METH_VARARGS, nullptr},
{"_dist_scatter_send", (PyCFunction)THDPModule_scatterSend, METH_VARARGS, nullptr},
{"_dist_scatter_recv", (PyCFunction)THDPModule_scatterRecv, METH_VARARGS, nullptr},
{"_dist_barrier", (PyCFunction)THDPModule_barrier, METH_O, nullptr},
{"_dist_new_group", (PyCFunction)THDPModule_newGroup, METH_VARARGS, nullptr},
{"_dist_request_is_completed", (PyCFunction)THDPModule_requestIsCompleted, METH_O, nullptr},
{"_dist_request_wait", (PyCFunction)THDPModule_requestWait, METH_O, nullptr},
{nullptr}
};
PyMethodDef* THDPModule_methods() {

View File

@ -10,7 +10,7 @@ void THDPInsertCopyFunctionFromWorker(
auto wrapper = [copyFunc](PyObject* dst_, PyObject* src_) {
TensorSrc* src = THPTypeInfo<TensorSrc>::cdata(src_);
PyThreadState *_save = NULL;
PyThreadState *_save = nullptr;
try {
Py_UNBLOCK_THREADS;
copyFunc(LIBRARY_STATE THDPModule_makeDescriptor(dst_), src);
@ -36,7 +36,7 @@ void THDPInsertCopyFunctionFromMaster(
auto wrapper = [copyFunc](PyObject* dst_, PyObject* src_) {
TensorDst* dst = THPTypeInfo<TensorDst>::cdata(dst_);
PyThreadState *_save = NULL;
PyThreadState *_save = nullptr;
try {
Py_UNBLOCK_THREADS;
copyFunc(LIBRARY_STATE dst, THDPModule_makeDescriptor(src_));

View File

@ -2,7 +2,7 @@
#define TH_GENERIC_FILE "generic/Storage.cpp"
#else
PyObject *THPStorageClass = NULL;
PyObject *THPStorageClass = nullptr;
PyObject * THPStorage_(New)(THWStorage *ptr)
{
@ -27,7 +27,7 @@ static THWStorage* THPStorage_(newWithAllocator)(int64_t size, at::Allocator* al
{
#if defined(THC_GENERIC_FILE) || defined(THD_GENERIC_FILE)
THPUtils_setError(THPStorageStr " does not support custom allocators");
return NULL;
return nullptr;
#else
return THWStorage_(newWithAllocator)(LIBRARY_STATE size, allocator);
#endif
@ -40,10 +40,10 @@ static PyObject * THPStorage_(pynew)(PyTypeObject *type, PyObject *args, PyObjec
THPStoragePtr self((THPStorage *)type->tp_alloc(type, 0));
THPUtils_assert(self, "failed to allocate a " THPStorageStr " object");
THAllocator* allocator = NULL;
THAllocator* allocator = nullptr;
// Internally we allow constructing with a keywoard only argument cdata
if (kwargs != NULL) {
if (kwargs != nullptr) {
PyObject *allocator_ptr = PyDict_GetItemString(kwargs, "allocator");
if (allocator_ptr) {
THPUtils_assert(THPUtils_checkLong(allocator_ptr), "invalid allocator");
@ -89,7 +89,7 @@ static PyObject * THPStorage_(pynew)(PyTypeObject *type, PyObject *args, PyObjec
// torch.Storage(view_source, [offset, [size]])
if (num_args < 4 && THPStorage_(Check)(first_arg)) {
THPUtils_setError("storage views not supported");
return NULL;
return nullptr;
}
// torch.Storage(sequence)
@ -119,7 +119,7 @@ static PyObject * THPStorage_(pynew)(PyTypeObject *type, PyObject *args, PyObjec
THPUtils_typename(first_arg),
THPUtils_typename(item.get()),
THPUtils_typeTraits<real>::python_type_str);
return NULL;
return nullptr;
}
return (PyObject*)self.release();
#endif
@ -132,7 +132,7 @@ static PyObject * THPStorage_(pynew)(PyTypeObject *type, PyObject *args, PyObjec
"(" THPStorageStr " view_source)",
"(" THPStorageStr " view_source, int offset)",
"(" THPStorageStr " view_source, int offset, int size)");
return NULL;
return nullptr;
END_HANDLE_TH_ERRORS
}
@ -154,18 +154,18 @@ static PyObject * THPStorage_(get)(THPStorage *self, PyObject *index)
if (nindex < 0 || nindex >= self->cdata->numel()) {
PyErr_Format(PyExc_IndexError, "index %" PRId64 " out of range for storage of "
"size %" PRId64, (int64_t) nindex, (int64_t) self->cdata->numel());
return NULL;
return nullptr;
}
real value = THWStorage_(get)(LIBRARY_STATE self->cdata, nindex);
return THPUtils_(newReal)(value);
/* Slice index */
} else if (PySlice_Check(index)) {
THPUtils_setError("storages don't support slicing");
return NULL;
return nullptr;
}
PyErr_Format(PyExc_TypeError, "can't index a " THPStorageStr " with %s",
THPUtils_typename(index));
return NULL;
return nullptr;
END_HANDLE_TH_ERRORS
}
@ -214,7 +214,7 @@ static PyMappingMethods THPStorage_(mappingmethods) = {
// TODO: implement equality
PyTypeObject THPStorageType = {
PyVarObject_HEAD_INIT(NULL, 0)
PyVarObject_HEAD_INIT(nullptr, 0)
"torch._C." THPStorageBaseStr, /* tp_name */
sizeof(THPStorage), /* tp_basicsize */
0, /* tp_itemsize */
@ -234,7 +234,7 @@ PyTypeObject THPStorageType = {
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
NULL, /* tp_doc */
nullptr, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
@ -255,8 +255,8 @@ PyTypeObject THPStorageType = {
};
static struct PyMemberDef THPStorage_(members)[] = {
{(char*)"_cdata", T_ULONGLONG, offsetof(THPStorage, cdata), READONLY, NULL},
{NULL}
{(char*)"_cdata", T_ULONGLONG, offsetof(THPStorage, cdata), READONLY, nullptr},
{nullptr}
};
extern THPCopyList THWStorage_(copy_functions);

View File

@ -89,11 +89,11 @@ static PyObject * THPStorage_(fill_)(THPStorage *self, PyObject *number_arg)
static PyObject * THPStorage_(fromBuffer)(PyObject *_unused, PyObject *args, PyObject *keywds)
{
HANDLE_TH_ERRORS
PyObject *obj = NULL;
const char* byte_order_str = NULL;
PyObject *obj = nullptr;
const char* byte_order_str = nullptr;
Py_ssize_t count = -1, offset = 0;
Py_buffer buffer;
static char *kwlist[] = {"buffer", "byte_order", "count", "offset", NULL};
static char *kwlist[] = {"buffer", "byte_order", "count", "offset", nullptr};
const char* argtypes;
#if defined(TH_REAL_IS_BYTE) || defined(TH_REAL_IS_CHAR)
argtypes = "O|snn";
@ -103,7 +103,7 @@ static PyObject * THPStorage_(fromBuffer)(PyObject *_unused, PyObject *args, PyO
if (!PyArg_ParseTupleAndKeywords(args, keywds, argtypes, kwlist,
&obj, &byte_order_str, &count, &offset)) {
return NULL;
return nullptr;
}
#if !(defined(TH_REAL_IS_BYTE) || defined(TH_REAL_IS_CHAR))
@ -118,19 +118,19 @@ static PyObject * THPStorage_(fromBuffer)(PyObject *_unused, PyObject *args, PyO
PyErr_Format(PyExc_ValueError,
"invalid byte_order '%s' (expected 'big', 'little', or 'native')",
byte_order_str);
return NULL;
return nullptr;
}
#endif
if (PyObject_GetBuffer(obj, &buffer, PyBUF_SIMPLE) < 0)
return NULL;
return nullptr;
if (offset < 0 || offset > buffer.len) {
PyErr_Format(PyExc_ValueError,
"offset must be non-negative and no greater than buffer length (%" PRId64 "), "
"but got %" PRId64, (int64_t)offset, (int64_t)buffer.len);
PyBuffer_Release(&buffer);
return NULL;
return nullptr;
}
if (count < 0) {
@ -138,7 +138,7 @@ static PyObject * THPStorage_(fromBuffer)(PyObject *_unused, PyObject *args, PyO
PyErr_Format(PyExc_ValueError, "buffer size (%" PRId64 ") must be a multiple "
"of element size (%" PRId64 ")", (int64_t)buffer.len, (int64_t)sizeof(real));
PyBuffer_Release(&buffer);
return NULL;
return nullptr;
}
count = (buffer.len - offset) / sizeof(real);
}
@ -148,7 +148,7 @@ static PyObject * THPStorage_(fromBuffer)(PyObject *_unused, PyObject *args, PyO
"%" PRId64 ", but specified a size of %" PRId64, (int64_t)(buffer.len - offset),
(int64_t)offset, (int64_t)count);
PyBuffer_Release(&buffer);
return NULL;
return nullptr;
}
uint8_t* src = (uint8_t*) buffer.buf;
@ -185,10 +185,10 @@ static PyObject * THPStorage_(fromFile)(PyObject *_unused, PyObject *args, PyObj
const char *filename;
Py_ssize_t size = 0;
int shared = 0;
static char *kwlist[] = {"filename", "shared", "size", NULL};
static char *kwlist[] = {"filename", "shared", "size", nullptr};
if (!PyArg_ParseTupleAndKeywords(args, keywds, "s|in", kwlist,
&filename, &shared, &size)) {
return NULL;
return nullptr;
}
if (shared)
shared = TH_ALLOCATOR_MAPPED_SHARED;
@ -293,28 +293,28 @@ PyObject * THPStorage_(_setCdata)(THPStorage *self, PyObject *new_cdata)
}
static PyMethodDef THPStorage_(methods)[] = {
{"copy_", (PyCFunction)THPStorage_(copy_), METH_VARARGS | METH_KEYWORDS, NULL},
{"element_size", (PyCFunction)THPStorage_(elementSize), METH_NOARGS, NULL},
{"fill_", (PyCFunction)THPStorage_(fill_), METH_O, NULL},
{"new", (PyCFunction)THPStorage_(new), METH_NOARGS, NULL},
{"resize_", (PyCFunction)THPStorage_(resize_), METH_O, NULL},
{"size", (PyCFunction)THPStorage_(size), METH_NOARGS, NULL},
{"copy_", (PyCFunction)THPStorage_(copy_), METH_VARARGS | METH_KEYWORDS, nullptr},
{"element_size", (PyCFunction)THPStorage_(elementSize), METH_NOARGS, nullptr},
{"fill_", (PyCFunction)THPStorage_(fill_), METH_O, nullptr},
{"new", (PyCFunction)THPStorage_(new), METH_NOARGS, nullptr},
{"resize_", (PyCFunction)THPStorage_(resize_), METH_O, nullptr},
{"size", (PyCFunction)THPStorage_(size), METH_NOARGS, nullptr},
#ifndef THD_GENERIC_FILE
{"data_ptr", (PyCFunction)THPStorage_(dataPtr), METH_NOARGS, NULL},
{"is_pinned", (PyCFunction)THPStorage_(isPinned), METH_NOARGS, NULL},
{"_write_file", (PyCFunction)THPStorage_(writeFile), METH_VARARGS, NULL},
{"_new_with_file", (PyCFunction)THPStorage_(newWithFile), METH_O | METH_STATIC, NULL},
{"_set_from_file", (PyCFunction)THPStorage_(setFromFile), METH_VARARGS, NULL},
{"data_ptr", (PyCFunction)THPStorage_(dataPtr), METH_NOARGS, nullptr},
{"is_pinned", (PyCFunction)THPStorage_(isPinned), METH_NOARGS, nullptr},
{"_write_file", (PyCFunction)THPStorage_(writeFile), METH_VARARGS, nullptr},
{"_new_with_file", (PyCFunction)THPStorage_(newWithFile), METH_O | METH_STATIC, nullptr},
{"_set_from_file", (PyCFunction)THPStorage_(setFromFile), METH_VARARGS, nullptr},
#endif // !defined(THD_GENERIC_FILE)
#if !defined(THC_GENERIC_FILE) && !defined(THD_GENERIC_FILE)
{"from_buffer", (PyCFunction)THPStorage_(fromBuffer), METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
{"from_buffer", (PyCFunction)THPStorage_(fromBuffer), METH_VARARGS | METH_KEYWORDS | METH_STATIC, nullptr},
#endif
{"from_file", (PyCFunction)THPStorage_(fromFile), METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
{"from_file", (PyCFunction)THPStorage_(fromFile), METH_VARARGS | METH_KEYWORDS | METH_STATIC, nullptr},
#ifdef THC_GENERIC_FILE
{"get_device", (PyCFunction)THPStorage_(getDevice), METH_NOARGS, NULL},
{"get_device", (PyCFunction)THPStorage_(getDevice), METH_NOARGS, nullptr},
#endif
{"_set_cdata", (PyCFunction)THPStorage_(_setCdata), METH_O, NULL},
{"_set_cdata", (PyCFunction)THPStorage_(_setCdata), METH_O, nullptr},
#ifndef THD_GENERIC_FILE
#endif
{NULL}
{nullptr}
};

View File

@ -62,7 +62,7 @@ static PyObject * THPStorage_(pyNewFilenameStorage)(PyObject *_unused, PyObject
HANDLE_TH_ERRORS
long long size;
if (!PyArg_ParseTuple(args, "L", &size)) {
return NULL;
return nullptr;
}
return THPStorage_(New)(THPStorage_(newFilenameStorage)(size));
END_HANDLE_TH_ERRORS
@ -87,14 +87,14 @@ static PyObject * THPStorage_(shareFilename)(THPStorage *self)
}
THPObjectPtr manager_handle(PyBytes_FromString(ctx->manager_handle()));
if (!manager_handle) return NULL;
if (!manager_handle) return nullptr;
THPObjectPtr storage_handle(PyBytes_FromString(ctx->filename()));
if (!storage_handle) return NULL;
if (!storage_handle) return nullptr;
THPObjectPtr size(PyLong_FromLong(storage->numel()));
if (!size) return NULL;
if (!size) return nullptr;
THPObjectPtr tuple(PyTuple_New(3));
if (!tuple) return NULL;
if (!tuple) return nullptr;
PyTuple_SET_ITEM(tuple.get(), 0, manager_handle.release());
PyTuple_SET_ITEM(tuple.get(), 1, storage_handle.release());
PyTuple_SET_ITEM(tuple.get(), 2, size.release());
@ -110,9 +110,9 @@ static PyObject * THPStorage_(newSharedFilename)(PyObject *_unused, PyObject *ar
PyObject *_object_handle = PyTuple_GET_ITEM(args, 1);
PyObject *_size = PyTuple_GET_ITEM(args, 2);
if (!PyBytes_Check(_manager_handle) || !PyBytes_Check(_object_handle) || !THPUtils_checkLong(_size)) {
THPUtils_invalidArguments(args, NULL, "_new_shared in file system mode", 1,
THPUtils_invalidArguments(args, nullptr, "_new_shared in file system mode", 1,
"a handle (string/bytes) and storage size (int)");
return NULL;
return nullptr;
}
const char *manager_handle = PyBytes_AS_STRING(_manager_handle);
const char *object_handle = PyBytes_AS_STRING(_object_handle);
@ -143,7 +143,7 @@ static PyObject * THPStorage_(pyNewFdStorage)(PyObject *_unused, PyObject *args)
HANDLE_TH_ERRORS
long long size;
if (!PyArg_ParseTuple(args, "L", &size)) {
return NULL;
return nullptr;
}
return THPStorage_(New)(THPStorage_(newFdStorage)(size));
END_HANDLE_TH_ERRORS
@ -166,12 +166,12 @@ static PyObject * THPStorage_(shareFd)(THPStorage *self)
}
THPObjectPtr storage_handle(PyLong_FromLong(ctx->fd()));
if (!storage_handle) return NULL;
if (!storage_handle) return nullptr;
THPObjectPtr size(PyLong_FromLong(storage->numel()));
if (!size) return NULL;
if (!size) return nullptr;
THPObjectPtr tuple(PyTuple_New(2));
if (!tuple) return NULL;
if (!tuple) return nullptr;
PyTuple_SET_ITEM(tuple.get(), 0, storage_handle.release());
PyTuple_SET_ITEM(tuple.get(), 1, size.release());
return tuple.release();
@ -185,16 +185,16 @@ static PyObject * THPStorage_(newSharedFd)(PyObject *_unused, PyObject *args)
PyObject *_tmp_fd = PyTuple_GET_ITEM(args, 0);
PyObject *_size = PyTuple_GET_ITEM(args, 1);
if (!THPUtils_checkLong(_tmp_fd) || !THPUtils_checkLong(_size)) {
THPUtils_invalidArguments(args, NULL, "_new_shared in file descriptor mode",
THPUtils_invalidArguments(args, nullptr, "_new_shared in file descriptor mode",
1, "a file descriptor (int) and storage size (int)");
return NULL;
return nullptr;
}
int fd;
int tmp_fd = (int) THPUtils_unpackLong(_tmp_fd);
int64_t size = THPUtils_unpackLong(_size);
if ((fd = dup(tmp_fd)) == -1) {
THPUtils_setError("could not duplicate a shared memory file descriptor");
return NULL;
return nullptr;
}
int flags = TH_ALLOCATOR_MAPPED_SHAREDMEM |
@ -235,7 +235,7 @@ static PyObject * THPStorage_(shareCuda)(THPStorage *self)
size = PyLong_FromSize_t(base_size / sizeof(real));
}
if (!tuple || !device || !_handle || !size || !_offset) {
return NULL;
return nullptr;
}
PyTuple_SET_ITEM(tuple.get(), 0, device.release());
PyTuple_SET_ITEM(tuple.get(), 1, _handle.release());
@ -254,9 +254,9 @@ static PyObject * THPStorage_(newSharedCuda)(PyObject *_unused, PyObject *args)
PyObject *_size = PyTuple_GET_ITEM(args, 2);
if (!(THPUtils_checkLong(_device) && THPUtils_checkLong(_size)
&& (_handle == Py_None || PyBytes_Check(_handle)))) {
THPUtils_invalidArguments(args, NULL, "_new_shared in CUDA mode", 1,
THPUtils_invalidArguments(args, nullptr, "_new_shared in CUDA mode", 1,
"(int device, bytes handle, int storage_size)");
return NULL;
return nullptr;
}
size_t storage_size = (size_t)THPUtils_unpackLong(_size);
@ -267,12 +267,12 @@ static PyObject * THPStorage_(newSharedCuda)(PyObject *_unused, PyObject *args)
char *buffer;
Py_ssize_t handle_size;
if (PyBytes_AsStringAndSize(_handle, &buffer, &handle_size) == -1) {
return NULL;
return nullptr;
}
THPUtils_assert(handle_size == CUDA_IPC_HANDLE_SIZE, "incorrect handle size");
cudaIpcMemHandle_t handle = *(cudaIpcMemHandle_t*)buffer;
void *devPtr = NULL;
void *devPtr = nullptr;
THCudaCheck(cudaIpcOpenMemHandle(&devPtr, handle, cudaIpcMemLazyEnablePeerAccess));
THWStoragePtr base(THWStorage_(newWithDataAndAllocator)(
@ -364,24 +364,24 @@ PyObject * THPStorage_(isShared)(THPStorage *self)
}
static PyMethodDef THPStorage_(sharingMethods)[] = {
{"_new_with_weak_ptr", (PyCFunction)THPStorage_(newWithWeakPtr), METH_O | METH_CLASS, NULL},
{"_new_with_weak_ptr", (PyCFunction)THPStorage_(newWithWeakPtr), METH_O | METH_CLASS, nullptr},
#ifdef THC_GENERIC_FILE
{"_share_cuda_", (PyCFunction)THPStorage_(shareCuda), METH_NOARGS, NULL},
{"_new_shared_cuda", (PyCFunction)THPStorage_(newSharedCuda), METH_VARARGS | METH_STATIC, NULL},
{"_share_cuda_", (PyCFunction)THPStorage_(shareCuda), METH_NOARGS, nullptr},
{"_new_shared_cuda", (PyCFunction)THPStorage_(newSharedCuda), METH_VARARGS | METH_STATIC, nullptr},
#else
{"_share_fd_", (PyCFunction)THPStorage_(shareFd), METH_NOARGS, NULL},
{"_new_shared_fd", (PyCFunction)THPStorage_(newSharedFd), METH_VARARGS | METH_STATIC, NULL},
{"_new_using_fd", (PyCFunction)THPStorage_(pyNewFdStorage), METH_VARARGS | METH_STATIC, NULL},
{"_share_filename_", (PyCFunction)THPStorage_(shareFilename), METH_NOARGS, NULL},
{"_new_shared_filename", (PyCFunction)THPStorage_(newSharedFilename), METH_VARARGS | METH_STATIC, NULL},
{"_new_using_filename", (PyCFunction)THPStorage_(pyNewFilenameStorage), METH_VARARGS | METH_STATIC, NULL},
{"_share_fd_", (PyCFunction)THPStorage_(shareFd), METH_NOARGS, nullptr},
{"_new_shared_fd", (PyCFunction)THPStorage_(newSharedFd), METH_VARARGS | METH_STATIC, nullptr},
{"_new_using_fd", (PyCFunction)THPStorage_(pyNewFdStorage), METH_VARARGS | METH_STATIC, nullptr},
{"_share_filename_", (PyCFunction)THPStorage_(shareFilename), METH_NOARGS, nullptr},
{"_new_shared_filename", (PyCFunction)THPStorage_(newSharedFilename), METH_VARARGS | METH_STATIC, nullptr},
{"_new_using_filename", (PyCFunction)THPStorage_(pyNewFilenameStorage), METH_VARARGS | METH_STATIC, nullptr},
#endif
{"_weak_ref", (PyCFunction)THPStorage_(weakRef), METH_NOARGS, NULL},
{"_free_weak_ref", (PyCFunction)THPStorage_(freeWeakRef), METH_O | METH_STATIC, NULL},
{"_expired", (PyCFunction)THPStorage_(expired), METH_O | METH_STATIC, NULL},
{"_shared_decref", (PyCFunction)THPStorage_(sharedDecref), METH_NOARGS, NULL},
{"_shared_incref", (PyCFunction)THPStorage_(sharedIncref), METH_NOARGS, NULL},
{"_get_shared_fd", (PyCFunction)THPStorage_(sharedFd), METH_NOARGS, NULL},
{"is_shared", (PyCFunction)THPStorage_(isShared), METH_NOARGS, NULL},
{NULL}
{"_weak_ref", (PyCFunction)THPStorage_(weakRef), METH_NOARGS, nullptr},
{"_free_weak_ref", (PyCFunction)THPStorage_(freeWeakRef), METH_O | METH_STATIC, nullptr},
{"_expired", (PyCFunction)THPStorage_(expired), METH_O | METH_STATIC, nullptr},
{"_shared_decref", (PyCFunction)THPStorage_(sharedDecref), METH_NOARGS, nullptr},
{"_shared_incref", (PyCFunction)THPStorage_(sharedIncref), METH_NOARGS, nullptr},
{"_get_shared_fd", (PyCFunction)THPStorage_(sharedFd), METH_NOARGS, nullptr},
{"is_shared", (PyCFunction)THPStorage_(isShared), METH_NOARGS, nullptr},
{nullptr}
};

View File

@ -903,7 +903,7 @@ struct CUDAFusedKernel : public FusedKernel {
std::tie(chunk_desc, concat_desc, has_random) = codegen::emitCompilationUnit(cu, name, agraph, true);
compilation_unit = cu.str();
nvrtcProgram program;
TORCH_NVRTC_CHECK(nvrtcCreateProgram(&program, compilation_unit.c_str(), NULL, 0, nullptr, nullptr));
TORCH_NVRTC_CHECK(nvrtcCreateProgram(&program, compilation_unit.c_str(), nullptr, 0, nullptr, nullptr));
std::string compute = "--gpu-architecture=compute_" + std::to_string(prop.major) + std::to_string(prop.minor);
std::vector<const char *> args = {"--std=c++11", compute.c_str(), "-default-device"};

View File

@ -139,7 +139,7 @@ namespace torch { namespace jit {
// we then declare constexpr Symbols to get everything the actual Symbol
// type we want. Symbols must be constexpr to be valid to be "case"ed on.
typedef uint32_t unique_t;
using unique_t = uint32_t;
static const std::string domain_prefix = "org.pytorch.";

View File

@ -56,7 +56,6 @@ struct Node;
struct Value;
TORCH_API std::ostream& operator<<(std::ostream & out, const Graph & g);
TORCH_API std::ostream& operator<<(std::ostream & out, const Type & t);
TORCH_API std::ostream& operator<<(std::ostream & out, const Node & n);
// A list of nodes, with inputs and outputs
@ -114,7 +113,7 @@ private:
public:
Scope() {
name_ = Symbol::scope("");
parent_ = NULL;
parent_ = nullptr;
}
Scope(Scope* parent, Symbol name) {
name_ = name;
@ -125,13 +124,13 @@ public:
return children_.back().get();
}
Scope* parent() {
if (parent_ == NULL) {
if (parent_ == nullptr) {
throw std::runtime_error("Cannot get parent from Scope with no parent");
}
return parent_;
}
bool isRoot() {
return parent_ == NULL;
return parent_ == nullptr;
}
Scope* getRoot() {
Scope* current = this;
@ -197,7 +196,7 @@ public:
return unique_;
}
bool hasUniqueName() const {
return unique_name_ != "";
return !unique_name_.empty();
}
TORCH_API Value* setUniqueName(const std::string & name);
std::string uniqueName() const {
@ -331,7 +330,7 @@ public:
scope_ = scope;
}
std::string scopeName() const {
if (scope_ == NULL) {
if (scope_ == nullptr) {
return "";
}
return scope_->namesFromRoot();
@ -771,10 +770,10 @@ struct Block {
return static_cast<const Node*>(output_)->inputs();
}
graph_node_list nodes() {
return graph_node_list(output_, kNextDirection);
return {output_, kNextDirection};
}
const_graph_node_list nodes() const {
return const_graph_node_list(output_, kNextDirection);
return {output_, kNextDirection};
}
Node * return_node() {
return output_;

View File

@ -25,10 +25,10 @@ std::string getPythonInterpreterStackTrace() {
std::stringstream stack_trace;
AutoGIL gil;
PyThreadState *tstate = PyThreadState_GET();
if (NULL != tstate && NULL != tstate->frame) {
if (nullptr != tstate && nullptr != tstate->frame) {
PyFrameObject *frame = tstate->frame;
while (NULL != frame) {
while (nullptr != frame) {
int line = PyCode_Addr2Line(frame->f_code, frame->f_lasti);
std::string filename = THPUtils_unpackString(frame->f_code->co_filename);
std::string funcname = THPUtils_unpackString(frame->f_code->co_name);

View File

@ -3,14 +3,14 @@
static PyObject* module;
static PyMethodDef TorchNvrtcMethods[] = {
{NULL, NULL, 0, NULL}
{nullptr, nullptr, 0, nullptr}
};
#if PY_MAJOR_VERSION != 2
static struct PyModuleDef torchnvrtcmodule = {
PyModuleDef_HEAD_INIT,
"torch._nvrtc",
NULL,
nullptr,
-1,
TorchNvrtcMethods
};
@ -26,7 +26,7 @@ PyMODINIT_FUNC PyInit__nvrtc(void)
#if PY_MAJOR_VERSION == 2
#define ASSERT_TRUE(cmd) if (!(cmd)) {PyErr_SetString(PyExc_ImportError, "initialization error in torch._nvrtc"); return;}
#else
#define ASSERT_TRUE(cmd) if (!(cmd)) return NULL
#define ASSERT_TRUE(cmd) if (!(cmd)) return nullptr
#endif
#if PY_MAJOR_VERSION == 2

View File

@ -119,8 +119,8 @@ PyObject *Tensor_is_sparse(PyTensorType *self) {
}
static struct PyMethodDef metaclass_methods[] = {
{"__instancecheck__", (PyCFunction)Tensor_instancecheck, METH_O, NULL},
{NULL}
{"__instancecheck__", (PyCFunction)Tensor_instancecheck, METH_O, nullptr},
{nullptr}
};
typedef PyObject *(*getter)(PyObject *, void *);
@ -151,7 +151,7 @@ static void py_initialize_metaclass(PyTypeObject& metaclass) {
static void py_initialize_tensor_type(PyTypeObject& type, const char* name, PyObject* tp_dict) {
// NOTE: we don't use the typical static declaration of PyTypeObject because
// we need to initialize as many types as there are VariableType instances.
// The typical PyVarObject_HEAD_INIT(NULL, 0) is described in the Python
// The typical PyVarObject_HEAD_INIT(nullptr, 0) is described in the Python
// documentation: it initializes the refcnt to 1 and the other object header
// fields to zero.
memset(&type, 0, sizeof(PyTypeObject));

View File

@ -137,7 +137,7 @@ void THPUtils_setError(const char *format, ...)
void THPUtils_addPyMethodDefs(std::vector<PyMethodDef>& vector, PyMethodDef* methods)
{
if (!vector.empty()) {
// remove NULL terminator
// remove nullptr terminator
vector.pop_back();
}
while (1) {

View File

@ -116,7 +116,7 @@
#define THPByteUtils_unpackAccreal(object) (int64_t)THPUtils_unpackReal_INT(object)
#define THPByteUtils_newAccreal(value) THPUtils_newReal_INT(value)
#define THPUtils_assert(cond, ...) THPUtils_assertRet(NULL, cond, __VA_ARGS__)
#define THPUtils_assert(cond, ...) THPUtils_assertRet(nullptr, cond, __VA_ARGS__)
#define THPUtils_assertRet(value, cond, ...) \
if (THP_EXPECT(!(cond), 0)) { THPUtils_setError(__VA_ARGS__); return value; }
THP_API void THPUtils_setError(const char *format, ...);

View File

@ -141,7 +141,7 @@ ScalarType infer_scalar_type(PyObject *obj) {
return numpy_dtype_to_aten(PyArray_TYPE((PyArrayObject*)obj));
}
if (PyArray_CheckScalar(obj)) {
return numpy_dtype_to_aten(PyArray_TYPE((PyArrayObject*)(PyArray_FromScalar(obj, NULL))));
return numpy_dtype_to_aten(PyArray_TYPE((PyArrayObject*)(PyArray_FromScalar(obj, nullptr))));
}
#endif
if (PySequence_Check(obj)) {

View File

@ -68,7 +68,7 @@ PyObject* tensor_to_numpy(const at::Tensor& tensor) {
0,
NPY_ARRAY_ALIGNED | NPY_ARRAY_WRITEABLE,
nullptr));
if (!array) return NULL;
if (!array) return nullptr;
// TODO: This attempts to keep the underlying memory alive by setting the base
// object of the ndarray to the tensor and disabling resizes on the storage.
@ -77,7 +77,7 @@ PyObject* tensor_to_numpy(const at::Tensor& tensor) {
PyObject* py_tensor = THPVariable_Wrap(make_variable(tensor, false));
if (!py_tensor) throw python_error();
if (PyArray_SetBaseObject((PyArrayObject*)array.get(), py_tensor) == -1) {
return NULL;
return nullptr;
}
// Use the private storage API
tensor.storage().unsafeGetStorageImpl()->set_resizable(false);