mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/68693 Generation of python bindings for native functions is split over 8 different files. One for each namespace, with the torch namespace split into 3 shards, and methods in their own file as well. This change ensures that editing any single (non-method) operator only causes one of these files to be rebuilt. Test Plan: Imported from OSS Reviewed By: jbschlosser Differential Revision: D32596270 Pulled By: albanD fbshipit-source-id: 0570ec69e7476b8f1bc21138ba18fe8f95ebbe3f
94 lines
2.5 KiB
C++
94 lines
2.5 KiB
C++
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
|
// ${generated_comment}
|
|
|
|
// Python bindings for torch.* functions implemented through ATen.
|
|
//
|
|
// The functions are bound as static methods on a class
|
|
// torch._C._VariableFunctions which is also aliased as Variable._torch
|
|
// and also copied into 'torch' module.
|
|
|
|
#include <Python.h>
|
|
|
|
// Undefine the copysign macro so that at::copysign works as intended with MSVC
|
|
// https://github.com/python/cpython/blob/c60394c7fc9cc09b16e9675a3eeb5844b6d8523f/PC/pyconfig.h#L196
|
|
#ifdef _MSC_VER
|
|
#undef copysign
|
|
#endif // _MSC_VER
|
|
|
|
#include "torch/csrc/autograd/python_torch_functions.h"
|
|
#include "torch/csrc/autograd/python_variable.h"
|
|
#include "torch/csrc/autograd/utils/wrap_outputs.h"
|
|
#include "torch/csrc/Dtype.h"
|
|
#include "torch/csrc/DynamicTypes.h"
|
|
#include "torch/csrc/Exceptions.h"
|
|
#include "torch/csrc/utils/out_types.h"
|
|
#include "torch/csrc/utils/pybind.h"
|
|
#include "torch/csrc/utils/pycfunction_helpers.h"
|
|
#include "torch/csrc/utils/python_arg_parser.h"
|
|
#include "torch/csrc/utils/tensor_layouts.h"
|
|
#include "torch/csrc/utils/tensor_new.h"
|
|
#include "torch/csrc/utils/tensor_numpy.h"
|
|
#include "torch/csrc/jit/frontend/tracer.h"
|
|
#include "torch/csrc/autograd/generated/variable_factories.h"
|
|
#include "torch/csrc/utils/structseq.h"
|
|
#include "torch/csrc/utils/cuda_lazy_init.h"
|
|
#include "torch/csrc/autograd/python_return_types.h"
|
|
|
|
#include <ATen/core/Tensor.h>
|
|
|
|
#ifndef AT_PER_OPERATOR_HEADERS
|
|
#include <ATen/Functions.h>
|
|
#else
|
|
$ops_headers
|
|
#endif
|
|
|
|
#include <functional>
|
|
#include <initializer_list>
|
|
#include <stdexcept>
|
|
#include <utility>
|
|
|
|
using at::Tensor;
|
|
using at::Device;
|
|
using at::Layout;
|
|
using at::Scalar;
|
|
using at::ScalarType;
|
|
using at::Backend;
|
|
using at::OptionalDeviceGuard;
|
|
using at::DeviceGuard;
|
|
using at::TensorOptions;
|
|
using at::IntArrayRef;
|
|
using at::Generator;
|
|
using at::TensorList;
|
|
using at::Dimname;
|
|
using at::DimnameList;
|
|
using at::ArrayRef;
|
|
|
|
using torch::utils::check_out_type_matches;
|
|
using namespace torch::autograd::utils;
|
|
|
|
// NOTE: See [Sharded File] comment in VariableType
|
|
|
|
namespace torch { namespace autograd {
|
|
|
|
// generated forward declarations start here
|
|
|
|
${py_forwards}
|
|
|
|
static PyMethodDef torch_functions_shard[] = {
|
|
${py_method_defs}
|
|
};
|
|
|
|
void gatherTorchFunctions${shard_id}(std::vector<PyMethodDef> &torch_functions) {
|
|
constexpr size_t num_functions = sizeof(torch_functions_shard) / sizeof(torch_functions_shard[0]);
|
|
torch_functions.insert(
|
|
torch_functions.end(),
|
|
torch_functions_shard,
|
|
torch_functions_shard + num_functions);
|
|
}
|
|
|
|
// generated methods start here
|
|
|
|
${py_methods}
|
|
|
|
}} // namespace torch::autograd
|