mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: https://github.com/facebookresearch/functorch/issues/87 TODO: * [x] Add comments * [x] Add test * [x] Fix XLA <details> <summary>Generated python_return_types.cpp</summary> ```cpp #include <Python.h> #include <vector> #include <map> #include <string> #include "torch/csrc/autograd/python_return_types.h" #include "torch/csrc/utils/structseq.h" #include "torch/csrc/Exceptions.h" namespace { PyTypeObject* get__det_lu_based_helper_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"det", ""}, {"lu", ""}, {"pivs", ""}, {nullptr} }; static PyTypeObject _det_lu_based_helperNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types._det_lu_based_helper", nullptr, NamedTuple_fields, 3 }; if (!is_initialized) { PyStructSequence_InitType(&_det_lu_based_helperNamedTuple, &desc); _det_lu_based_helperNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &_det_lu_based_helperNamedTuple; } PyTypeObject* get__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"output", ""}, {"mask", ""}, {nullptr} }; static PyTypeObject _fake_quantize_per_tensor_affine_cachemask_tensor_qparamsNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&_fake_quantize_per_tensor_affine_cachemask_tensor_qparamsNamedTuple, &desc); _fake_quantize_per_tensor_affine_cachemask_tensor_qparamsNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &_fake_quantize_per_tensor_affine_cachemask_tensor_qparamsNamedTuple; } PyTypeObject* get__fused_moving_avg_obs_fq_helper_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"output", ""}, {"mask", ""}, {nullptr} }; static PyTypeObject _fused_moving_avg_obs_fq_helperNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types._fused_moving_avg_obs_fq_helper", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&_fused_moving_avg_obs_fq_helperNamedTuple, &desc); _fused_moving_avg_obs_fq_helperNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &_fused_moving_avg_obs_fq_helperNamedTuple; } PyTypeObject* get__lu_with_info_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"LU", ""}, {"pivots", ""}, {"info", ""}, {nullptr} }; static PyTypeObject _lu_with_infoNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types._lu_with_info", nullptr, NamedTuple_fields, 3 }; if (!is_initialized) { PyStructSequence_InitType(&_lu_with_infoNamedTuple, &desc); _lu_with_infoNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &_lu_with_infoNamedTuple; } PyTypeObject* get__unpack_dual_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"primal", ""}, {"tangent", ""}, {nullptr} }; static PyTypeObject _unpack_dualNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types._unpack_dual", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&_unpack_dualNamedTuple, &desc); _unpack_dualNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &_unpack_dualNamedTuple; } PyTypeObject* get_aminmax_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"min", ""}, {"max", ""}, {nullptr} }; static PyTypeObject aminmaxNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.aminmax", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&aminmaxNamedTuple, &desc); aminmaxNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &aminmaxNamedTuple; } PyTypeObject* get_aminmax_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"min", ""}, {"max", ""}, {nullptr} }; static PyTypeObject aminmax_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.aminmax_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&aminmax_outNamedTuple1, &desc); aminmax_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &aminmax_outNamedTuple1; } PyTypeObject* get_cummax_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject cummaxNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.cummax", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&cummaxNamedTuple, &desc); cummaxNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &cummaxNamedTuple; } PyTypeObject* get_cummax_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject cummax_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.cummax_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&cummax_outNamedTuple1, &desc); cummax_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &cummax_outNamedTuple1; } PyTypeObject* get_cummin_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject cumminNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.cummin", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&cumminNamedTuple, &desc); cumminNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &cumminNamedTuple; } PyTypeObject* get_cummin_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject cummin_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.cummin_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&cummin_outNamedTuple1, &desc); cummin_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &cummin_outNamedTuple1; } PyTypeObject* get_eig_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"eigenvalues", ""}, {"eigenvectors", ""}, {nullptr} }; static PyTypeObject eig_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.eig_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&eig_outNamedTuple, &desc); eig_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &eig_outNamedTuple; } PyTypeObject* get_eig_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"eigenvalues", ""}, {"eigenvectors", ""}, {nullptr} }; static PyTypeObject eigNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.eig", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&eigNamedTuple1, &desc); eigNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &eigNamedTuple1; } PyTypeObject* get_frexp_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"mantissa", ""}, {"exponent", ""}, {nullptr} }; static PyTypeObject frexpNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.frexp", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&frexpNamedTuple, &desc); frexpNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &frexpNamedTuple; } PyTypeObject* get_frexp_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"mantissa", ""}, {"exponent", ""}, {nullptr} }; static PyTypeObject frexp_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.frexp_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&frexp_outNamedTuple1, &desc); frexp_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &frexp_outNamedTuple1; } PyTypeObject* get_geqrf_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"a", ""}, {"tau", ""}, {nullptr} }; static PyTypeObject geqrf_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.geqrf_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&geqrf_outNamedTuple, &desc); geqrf_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &geqrf_outNamedTuple; } PyTypeObject* get_geqrf_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"a", ""}, {"tau", ""}, {nullptr} }; static PyTypeObject geqrfNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.geqrf", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&geqrfNamedTuple1, &desc); geqrfNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &geqrfNamedTuple1; } PyTypeObject* get_histogram_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"hist", ""}, {"bin_edges", ""}, {nullptr} }; static PyTypeObject histogram_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.histogram_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&histogram_outNamedTuple, &desc); histogram_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &histogram_outNamedTuple; } PyTypeObject* get_histogram_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"hist", ""}, {"bin_edges", ""}, {nullptr} }; static PyTypeObject histogramNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.histogram", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&histogramNamedTuple1, &desc); histogramNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &histogramNamedTuple1; } PyTypeObject* get_kthvalue_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject kthvalueNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.kthvalue", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&kthvalueNamedTuple, &desc); kthvalueNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &kthvalueNamedTuple; } PyTypeObject* get_kthvalue_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject kthvalue_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.kthvalue_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&kthvalue_outNamedTuple1, &desc); kthvalue_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &kthvalue_outNamedTuple1; } PyTypeObject* get_linalg_cholesky_ex_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"L", ""}, {"info", ""}, {nullptr} }; static PyTypeObject linalg_cholesky_exNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_cholesky_ex", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_cholesky_exNamedTuple, &desc); linalg_cholesky_exNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_cholesky_exNamedTuple; } PyTypeObject* get_linalg_cholesky_ex_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"L", ""}, {"info", ""}, {nullptr} }; static PyTypeObject linalg_cholesky_ex_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_cholesky_ex_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_cholesky_ex_outNamedTuple1, &desc); linalg_cholesky_ex_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_cholesky_ex_outNamedTuple1; } PyTypeObject* get_linalg_eig_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"eigenvalues", ""}, {"eigenvectors", ""}, {nullptr} }; static PyTypeObject linalg_eigNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_eig", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_eigNamedTuple, &desc); linalg_eigNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_eigNamedTuple; } PyTypeObject* get_linalg_eig_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"eigenvalues", ""}, {"eigenvectors", ""}, {nullptr} }; static PyTypeObject linalg_eig_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_eig_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_eig_outNamedTuple1, &desc); linalg_eig_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_eig_outNamedTuple1; } PyTypeObject* get_linalg_eigh_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"eigenvalues", ""}, {"eigenvectors", ""}, {nullptr} }; static PyTypeObject linalg_eighNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_eigh", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_eighNamedTuple, &desc); linalg_eighNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_eighNamedTuple; } PyTypeObject* get_linalg_eigh_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"eigenvalues", ""}, {"eigenvectors", ""}, {nullptr} }; static PyTypeObject linalg_eigh_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_eigh_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_eigh_outNamedTuple1, &desc); linalg_eigh_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_eigh_outNamedTuple1; } PyTypeObject* get_linalg_inv_ex_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"inverse", ""}, {"info", ""}, {nullptr} }; static PyTypeObject linalg_inv_exNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_inv_ex", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_inv_exNamedTuple, &desc); linalg_inv_exNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_inv_exNamedTuple; } PyTypeObject* get_linalg_inv_ex_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"inverse", ""}, {"info", ""}, {nullptr} }; static PyTypeObject linalg_inv_ex_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_inv_ex_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_inv_ex_outNamedTuple1, &desc); linalg_inv_ex_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_inv_ex_outNamedTuple1; } PyTypeObject* get_linalg_lstsq_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"solution", ""}, {"residuals", ""}, {"rank", ""}, {"singular_values", ""}, {nullptr} }; static PyTypeObject linalg_lstsqNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_lstsq", nullptr, NamedTuple_fields, 4 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_lstsqNamedTuple, &desc); linalg_lstsqNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_lstsqNamedTuple; } PyTypeObject* get_linalg_lstsq_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"solution", ""}, {"residuals", ""}, {"rank", ""}, {"singular_values", ""}, {nullptr} }; static PyTypeObject linalg_lstsq_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_lstsq_out", nullptr, NamedTuple_fields, 4 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_lstsq_outNamedTuple1, &desc); linalg_lstsq_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_lstsq_outNamedTuple1; } PyTypeObject* get_linalg_qr_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"Q", ""}, {"R", ""}, {nullptr} }; static PyTypeObject linalg_qrNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_qr", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_qrNamedTuple, &desc); linalg_qrNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_qrNamedTuple; } PyTypeObject* get_linalg_qr_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"Q", ""}, {"R", ""}, {nullptr} }; static PyTypeObject linalg_qr_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_qr_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_qr_outNamedTuple1, &desc); linalg_qr_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_qr_outNamedTuple1; } PyTypeObject* get_linalg_slogdet_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"sign", ""}, {"logabsdet", ""}, {nullptr} }; static PyTypeObject linalg_slogdetNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_slogdet", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_slogdetNamedTuple, &desc); linalg_slogdetNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_slogdetNamedTuple; } PyTypeObject* get_linalg_slogdet_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"sign", ""}, {"logabsdet", ""}, {nullptr} }; static PyTypeObject linalg_slogdet_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_slogdet_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_slogdet_outNamedTuple1, &desc); linalg_slogdet_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_slogdet_outNamedTuple1; } PyTypeObject* get_linalg_svd_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"U", ""}, {"S", ""}, {"Vh", ""}, {nullptr} }; static PyTypeObject linalg_svd_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_svd_out", nullptr, NamedTuple_fields, 3 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_svd_outNamedTuple, &desc); linalg_svd_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_svd_outNamedTuple; } PyTypeObject* get_linalg_svd_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"U", ""}, {"S", ""}, {"Vh", ""}, {nullptr} }; static PyTypeObject linalg_svdNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.linalg_svd", nullptr, NamedTuple_fields, 3 }; if (!is_initialized) { PyStructSequence_InitType(&linalg_svdNamedTuple1, &desc); linalg_svdNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &linalg_svdNamedTuple1; } PyTypeObject* get_lstsq_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"solution", ""}, {"QR", ""}, {nullptr} }; static PyTypeObject lstsq_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.lstsq_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&lstsq_outNamedTuple, &desc); lstsq_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &lstsq_outNamedTuple; } PyTypeObject* get_lstsq_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"solution", ""}, {"QR", ""}, {nullptr} }; static PyTypeObject lstsqNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.lstsq", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&lstsqNamedTuple1, &desc); lstsqNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &lstsqNamedTuple1; } PyTypeObject* get_lu_unpack_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"P", ""}, {"L", ""}, {"U", ""}, {nullptr} }; static PyTypeObject lu_unpackNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.lu_unpack", nullptr, NamedTuple_fields, 3 }; if (!is_initialized) { PyStructSequence_InitType(&lu_unpackNamedTuple, &desc); lu_unpackNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &lu_unpackNamedTuple; } PyTypeObject* get_lu_unpack_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"P", ""}, {"L", ""}, {"U", ""}, {nullptr} }; static PyTypeObject lu_unpack_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.lu_unpack_out", nullptr, NamedTuple_fields, 3 }; if (!is_initialized) { PyStructSequence_InitType(&lu_unpack_outNamedTuple1, &desc); lu_unpack_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &lu_unpack_outNamedTuple1; } PyTypeObject* get_max_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject maxNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.max", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&maxNamedTuple, &desc); maxNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &maxNamedTuple; } PyTypeObject* get_max_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject max_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.max_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&max_outNamedTuple1, &desc); max_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &max_outNamedTuple1; } PyTypeObject* get_median_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject medianNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.median", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&medianNamedTuple, &desc); medianNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &medianNamedTuple; } PyTypeObject* get_median_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject median_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.median_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&median_outNamedTuple1, &desc); median_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &median_outNamedTuple1; } PyTypeObject* get_min_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject minNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.min", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&minNamedTuple, &desc); minNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &minNamedTuple; } PyTypeObject* get_min_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject min_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.min_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&min_outNamedTuple1, &desc); min_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &min_outNamedTuple1; } PyTypeObject* get_mode_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject modeNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.mode", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&modeNamedTuple, &desc); modeNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &modeNamedTuple; } PyTypeObject* get_mode_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject mode_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.mode_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&mode_outNamedTuple1, &desc); mode_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &mode_outNamedTuple1; } PyTypeObject* get_nanmedian_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject nanmedianNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.nanmedian", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&nanmedianNamedTuple, &desc); nanmedianNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &nanmedianNamedTuple; } PyTypeObject* get_nanmedian_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject nanmedian_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.nanmedian_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&nanmedian_outNamedTuple1, &desc); nanmedian_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &nanmedian_outNamedTuple1; } PyTypeObject* get_qr_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"Q", ""}, {"R", ""}, {nullptr} }; static PyTypeObject qr_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.qr_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&qr_outNamedTuple, &desc); qr_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &qr_outNamedTuple; } PyTypeObject* get_qr_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"Q", ""}, {"R", ""}, {nullptr} }; static PyTypeObject qrNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.qr", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&qrNamedTuple1, &desc); qrNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &qrNamedTuple1; } PyTypeObject* get_slogdet_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"sign", ""}, {"logabsdet", ""}, {nullptr} }; static PyTypeObject slogdetNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.slogdet", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&slogdetNamedTuple, &desc); slogdetNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &slogdetNamedTuple; } PyTypeObject* get_solve_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"solution", ""}, {"LU", ""}, {nullptr} }; static PyTypeObject solveNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.solve", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&solveNamedTuple, &desc); solveNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &solveNamedTuple; } PyTypeObject* get_solve_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"solution", ""}, {"LU", ""}, {nullptr} }; static PyTypeObject solve_outNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.solve_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&solve_outNamedTuple1, &desc); solve_outNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &solve_outNamedTuple1; } PyTypeObject* get_sort_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject sort_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.sort_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&sort_outNamedTuple, &desc); sort_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &sort_outNamedTuple; } PyTypeObject* get_sort_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject sortNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.sort", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&sortNamedTuple1, &desc); sortNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &sortNamedTuple1; } PyTypeObject* get_svd_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"U", ""}, {"S", ""}, {"V", ""}, {nullptr} }; static PyTypeObject svd_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.svd_out", nullptr, NamedTuple_fields, 3 }; if (!is_initialized) { PyStructSequence_InitType(&svd_outNamedTuple, &desc); svd_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &svd_outNamedTuple; } PyTypeObject* get_svd_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"U", ""}, {"S", ""}, {"V", ""}, {nullptr} }; static PyTypeObject svdNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.svd", nullptr, NamedTuple_fields, 3 }; if (!is_initialized) { PyStructSequence_InitType(&svdNamedTuple1, &desc); svdNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &svdNamedTuple1; } PyTypeObject* get_symeig_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"eigenvalues", ""}, {"eigenvectors", ""}, {nullptr} }; static PyTypeObject symeig_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.symeig_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&symeig_outNamedTuple, &desc); symeig_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &symeig_outNamedTuple; } PyTypeObject* get_symeig_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"eigenvalues", ""}, {"eigenvectors", ""}, {nullptr} }; static PyTypeObject symeigNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.symeig", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&symeigNamedTuple1, &desc); symeigNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &symeigNamedTuple1; } PyTypeObject* get_topk_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject topk_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.topk_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&topk_outNamedTuple, &desc); topk_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &topk_outNamedTuple; } PyTypeObject* get_topk_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"values", ""}, {"indices", ""}, {nullptr} }; static PyTypeObject topkNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.topk", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&topkNamedTuple1, &desc); topkNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &topkNamedTuple1; } PyTypeObject* get_triangular_solve_out_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"solution", ""}, {"cloned_coefficient", ""}, {nullptr} }; static PyTypeObject triangular_solve_outNamedTuple; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.triangular_solve_out", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&triangular_solve_outNamedTuple, &desc); triangular_solve_outNamedTuple.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &triangular_solve_outNamedTuple; } PyTypeObject* get_triangular_solve_namedtuple() { static PyStructSequence_Field NamedTuple_fields[] = { {"solution", ""}, {"cloned_coefficient", ""}, {nullptr} }; static PyTypeObject triangular_solveNamedTuple1; static bool is_initialized = false; static PyStructSequence_Desc desc = { "torch.return_types.triangular_solve", nullptr, NamedTuple_fields, 2 }; if (!is_initialized) { PyStructSequence_InitType(&triangular_solveNamedTuple1, &desc); triangular_solveNamedTuple1.tp_repr = (reprfunc)torch::utils::returned_structseq_repr; is_initialized = true; } return &triangular_solveNamedTuple1; } } namespace torch { namespace autograd { std::map<std::string, PyTypeObject*>& get_namedtuple_types_map() { // [NOTE] Non-global map // This map calls Python functions during its initialization. // If it is a global static variable and in case it is loaded // before Python interpreter is ready, then the calls it makes during // initialization will SEGFAULT. // To avoid this we make it function static variable so that it is // initialized only after the Python interpreter is ready. static std::map<std::string, PyTypeObject*> namedtuple_types_map = { {"_det_lu_based_helper", get__det_lu_based_helper_namedtuple()}, {"_fake_quantize_per_tensor_affine_cachemask_tensor_qparams", get__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_namedtuple()}, {"_fused_moving_avg_obs_fq_helper", get__fused_moving_avg_obs_fq_helper_namedtuple()}, {"_lu_with_info", get__lu_with_info_namedtuple()}, {"_unpack_dual", get__unpack_dual_namedtuple()}, {"aminmax", get_aminmax_namedtuple()}, {"aminmax_out", get_aminmax_out_namedtuple()}, {"cummax", get_cummax_namedtuple()}, {"cummax_out", get_cummax_out_namedtuple()}, {"cummin", get_cummin_namedtuple()}, {"cummin_out", get_cummin_out_namedtuple()}, {"eig_out", get_eig_out_namedtuple()}, {"eig", get_eig_namedtuple()}, {"frexp", get_frexp_namedtuple()}, {"frexp_out", get_frexp_out_namedtuple()}, {"geqrf_out", get_geqrf_out_namedtuple()}, {"geqrf", get_geqrf_namedtuple()}, {"histogram_out", get_histogram_out_namedtuple()}, {"histogram", get_histogram_namedtuple()}, {"kthvalue", get_kthvalue_namedtuple()}, {"kthvalue_out", get_kthvalue_out_namedtuple()}, {"linalg_cholesky_ex", get_linalg_cholesky_ex_namedtuple()}, {"linalg_cholesky_ex_out", get_linalg_cholesky_ex_out_namedtuple()}, {"linalg_eig", get_linalg_eig_namedtuple()}, {"linalg_eig_out", get_linalg_eig_out_namedtuple()}, {"linalg_eigh", get_linalg_eigh_namedtuple()}, {"linalg_eigh_out", get_linalg_eigh_out_namedtuple()}, {"linalg_inv_ex", get_linalg_inv_ex_namedtuple()}, {"linalg_inv_ex_out", get_linalg_inv_ex_out_namedtuple()}, {"linalg_lstsq", get_linalg_lstsq_namedtuple()}, {"linalg_lstsq_out", get_linalg_lstsq_out_namedtuple()}, {"linalg_qr", get_linalg_qr_namedtuple()}, {"linalg_qr_out", get_linalg_qr_out_namedtuple()}, {"linalg_slogdet", get_linalg_slogdet_namedtuple()}, {"linalg_slogdet_out", get_linalg_slogdet_out_namedtuple()}, {"linalg_svd_out", get_linalg_svd_out_namedtuple()}, {"linalg_svd", get_linalg_svd_namedtuple()}, {"lstsq_out", get_lstsq_out_namedtuple()}, {"lstsq", get_lstsq_namedtuple()}, {"lu_unpack", get_lu_unpack_namedtuple()}, {"lu_unpack_out", get_lu_unpack_out_namedtuple()}, {"max", get_max_namedtuple()}, {"max_out", get_max_out_namedtuple()}, {"median", get_median_namedtuple()}, {"median_out", get_median_out_namedtuple()}, {"min", get_min_namedtuple()}, {"min_out", get_min_out_namedtuple()}, {"mode", get_mode_namedtuple()}, {"mode_out", get_mode_out_namedtuple()}, {"nanmedian", get_nanmedian_namedtuple()}, {"nanmedian_out", get_nanmedian_out_namedtuple()}, {"qr_out", get_qr_out_namedtuple()}, {"qr", get_qr_namedtuple()}, {"slogdet", get_slogdet_namedtuple()}, {"solve", get_solve_namedtuple()}, {"solve_out", get_solve_out_namedtuple()}, {"sort_out", get_sort_out_namedtuple()}, {"sort", get_sort_namedtuple()}, {"svd_out", get_svd_out_namedtuple()}, {"svd", get_svd_namedtuple()}, {"symeig_out", get_symeig_out_namedtuple()}, {"symeig", get_symeig_namedtuple()}, {"topk_out", get_topk_out_namedtuple()}, {"topk", get_topk_namedtuple()}, {"triangular_solve_out", get_triangular_solve_out_namedtuple()}, {"triangular_solve", get_triangular_solve_namedtuple()}, }; return namedtuple_types_map; } PyTypeObject* get_namedtuple(std::string name) { static auto& namedtuple_types_map = get_namedtuple_types_map(); return namedtuple_types_map[name]; } void initReturnTypes(PyObject* module) { static struct PyModuleDef def = { PyModuleDef_HEAD_INIT, "torch._C._return_types", nullptr, -1, {}}; PyObject* return_types_module = PyModule_Create(&def); if (!return_types_module) { throw python_error(); } for (const auto& return_type_pair : get_namedtuple_types_map()) { // hold onto the TypeObject for the unlikely case of user // deleting or overriding it. Py_INCREF(return_type_pair.second); if (PyModule_AddObject( return_types_module, return_type_pair.first.c_str(), (PyObject*)return_type_pair.second) != 0) { Py_DECREF((PyObject*)return_type_pair.second); throw python_error(); } } // steals a reference to return_types on success if (PyModule_AddObject(module, "_return_types", return_types_module) != 0) { Py_DECREF(return_types_module); throw python_error(); } } } // namespace autograd } // namespace torch ``` </details> <details> <summary>Eg. updated call in other python_*_functions</summary> ```cpp // linalg_cholesky_ex static PyObject * THPVariable_linalg_cholesky_ex(PyObject* self_, PyObject* args, PyObject* kwargs) { HANDLE_TH_ERRORS static PyTypeObject* NamedTuple = get_namedtuple("linalg_cholesky_ex"); static PyTypeObject* NamedTuple1 = get_namedtuple("linalg_cholesky_ex_out"); static PythonArgParser parser({ "linalg_cholesky_ex(Tensor input, *, bool upper=False, bool check_errors=False, TensorList[2] out=None)", }, /*traceable=*/true); ParsedArgs<4> parsed_args; auto _r = parser.parse(nullptr, args, kwargs, parsed_args); if(_r.has_torch_function()) { return handle_torch_function(_r, nullptr, args, kwargs, THPLinalgVariableFunctionsModule, "torch.linalg"); } if (_r.isNone(3)) { // aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) auto dispatch_linalg_cholesky_ex = [](const at::Tensor & self, bool upper, bool check_errors) -> ::std::tuple<at::Tensor,at::Tensor> { pybind11::gil_scoped_release no_gil; return at::linalg_cholesky_ex(self, upper, check_errors); }; return wrap(NamedTuple, dispatch_linalg_cholesky_ex(_r.tensor(0), _r.toBool(1), _r.toBool(2))); } else { // aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) auto out = _r.tensorlist_n<2>(3); auto dispatch_linalg_cholesky_ex_out = [](at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper, bool check_errors) -> ::std::tuple<at::Tensor,at::Tensor> { pybind11::gil_scoped_release no_gil; return at::linalg_cholesky_ex_out(L, info, self, upper, check_errors); }; return wrap(NamedTuple1, dispatch_linalg_cholesky_ex_out(out[0], out[1], _r.tensor(0), _r.toBool(1), _r.toBool(2))); } Py_RETURN_NONE; END_HANDLE_TH_ERRORS } ``` </details> Pull Request resolved: https://github.com/pytorch/pytorch/pull/66614 Reviewed By: H-Huang Differential Revision: D32741134 Pulled By: zou3519 fbshipit-source-id: 27bada30d20e66333ca1be1775608d9f0cbf9f59
1232 lines
47 KiB
C++
1232 lines
47 KiB
C++
// ${generated_comment}
|
|
|
|
#include <Python.h>
|
|
|
|
// Undefine the copysign macro so that at::copysign works as intended with MSVC
|
|
// https://github.com/python/cpython/blob/c60394c7fc9cc09b16e9675a3eeb5844b6d8523f/PC/pyconfig.h#L196
|
|
#ifdef _MSC_VER
|
|
#undef copysign
|
|
#endif // _MSC_VER
|
|
|
|
#include "torch/csrc/DynamicTypes.h"
|
|
#include "torch/csrc/Exceptions.h"
|
|
#include "torch/csrc/Size.h"
|
|
#include "torch/csrc/autograd/generated/VariableType.h"
|
|
#include "torch/csrc/autograd/python_variable.h"
|
|
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
|
|
#include "torch/csrc/autograd/utils/error_messages.h"
|
|
#include "torch/csrc/autograd/utils/wrap_outputs.h"
|
|
#include "torch/csrc/jit/frontend/tracer.h"
|
|
#ifdef USE_CUDA
|
|
#include "torch/csrc/cuda/Event.h"
|
|
#endif
|
|
#include "torch/csrc/utils/cuda_lazy_init.h"
|
|
#include "torch/csrc/utils/object_ptr.h"
|
|
#include "torch/csrc/utils/pycfunction_helpers.h"
|
|
#include "torch/csrc/utils/python_arg_parser.h"
|
|
#include "torch/csrc/utils/python_numbers.h"
|
|
#include "torch/csrc/utils/python_strings.h"
|
|
#include "torch/csrc/utils/python_tuples.h"
|
|
#include "torch/csrc/utils/tensor_apply.h"
|
|
#include "torch/csrc/utils/tensor_list.h"
|
|
#include "torch/csrc/utils/tensor_new.h"
|
|
#include "torch/csrc/utils/tensor_numpy.h"
|
|
#include "torch/csrc/utils/tensor_types.h"
|
|
#include "torch/csrc/utils/structseq.h"
|
|
#include "torch/csrc/autograd/python_return_types.h"
|
|
|
|
#include <ATen/ATen.h>
|
|
#include "c10/util/Optional.h"
|
|
#include "c10/core/Stream.h"
|
|
|
|
#include <stdexcept>
|
|
|
|
using at::DeviceGuard;
|
|
using at::device_of;
|
|
using at::OptionalDeviceGuard;
|
|
using at::Backend;
|
|
using at::Scalar;
|
|
using at::ScalarType;
|
|
using at::Tensor;
|
|
using c10::Stream;
|
|
using namespace torch::autograd::utils;
|
|
|
|
namespace torch { namespace autograd {
|
|
|
|
static PyObject * THPVariable__is_view(PyObject *self, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "_is_view", args);
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
if (self_.is_view()) {
|
|
Py_RETURN_TRUE;
|
|
} else {
|
|
Py_RETURN_FALSE;
|
|
}
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object bc no support for first-class functions in native_functions.yaml
|
|
// See: ATen/native/README.md for more context
|
|
static PyObject * THPVariable_apply_(PyObject* self, PyObject* arg)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
auto args = py::make_tuple(py::handle(arg));
|
|
return handle_torch_function(self, "apply_", args.ptr());
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
if (self_.requires_grad()) {
|
|
throw std::runtime_error(
|
|
"Can't call apply_() on Variable that requires grad. Use "
|
|
"var.detach().apply_() instead.");
|
|
}
|
|
return THPVariable_Wrap(torch::utils::apply_(self_, arg));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_size(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"size(int64_t dim)",
|
|
"size()",
|
|
"size(Dimname dim)",
|
|
});
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<3> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
if (r.idx == 0) {
|
|
if (jit::tracer::isTracing()) {
|
|
return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0)));
|
|
} else {
|
|
return wrap(self_.size(r.toInt64(0)));
|
|
}
|
|
} else if (r.idx == 1) {
|
|
// we can't do the normal wrapping here because IntArrayRef maps to both
|
|
// torch.Size and tuple in python.
|
|
return THPSize_New(self_);
|
|
}
|
|
else if (r.idx == 2) {
|
|
if (jit::tracer::isTracing()) {
|
|
TORCH_INTERNAL_ASSERT(false, "NYI: Named tensors w/ JIT");
|
|
}
|
|
return wrap(self_.size(r.dimname(0)));
|
|
}
|
|
Py_RETURN_NONE;
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_stride(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"stride(int64_t dim)",
|
|
"stride()",
|
|
"stride(Dimname dim)",
|
|
});
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<3> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
if (r.idx == 0) {
|
|
return wrap(self_.stride(r.toInt64(0)));
|
|
} else if (r.idx == 1) {
|
|
// yes, this is called strides in ATen.
|
|
IntArrayRef strides = self_.strides();
|
|
// we can't do the normal wrapping here because IntArrayRef maps to both
|
|
// torch.Size and tuple in python
|
|
return THPUtils_packInt64Array(strides.size(), strides.data());
|
|
}
|
|
else if (r.idx == 2) {
|
|
return wrap(self_.stride(r.dimname(0)));
|
|
}
|
|
Py_RETURN_NONE;
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object to avoid dispatch overhead
|
|
static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self_)) {
|
|
return handle_torch_function(self_, "get_device", args, nullptr);
|
|
}
|
|
auto& self = THPVariable_Unpack(self_);
|
|
return wrap(self.get_device());
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_has_names(PyObject* self_, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self_)) {
|
|
return handle_torch_function(self_, "has_names", args);
|
|
}
|
|
auto& self = THPVariable_Unpack(self_);
|
|
return wrap(self.has_names());
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object to avoid dispatch overhead
|
|
static PyObject * THPVariable_data_ptr(PyObject* self_, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self_)) {
|
|
return handle_torch_function(self_, "data_ptr", args);
|
|
}
|
|
auto& self = THPVariable_Unpack(self_);
|
|
return wrap(self.data_ptr());
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object to avoid dispatch overhead
|
|
static PyObject * THPVariable_storage_offset(PyObject* self_, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self_)) {
|
|
return handle_torch_function(self_, "storage_offset");
|
|
}
|
|
auto& self = THPVariable_Unpack(self_);
|
|
return wrap(self.storage_offset());
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object to avoid dispatch overhead
|
|
static PyObject * THPVariable_dim(PyObject* self, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "dim", args);
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
return THPUtils_packInt64(self_.dim());
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object to avoid dispatch overhead
|
|
static PyObject * THPVariable_numel(PyObject* self, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "numel", args);
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
return THPUtils_packInt64(self_.numel());
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static Tensor dispatch_contiguous(const Tensor & self, at::MemoryFormat memory_format) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
OptionalDeviceGuard device_guard(device_of(self));
|
|
return self.contiguous(memory_format);
|
|
}
|
|
|
|
static PyObject * THPVariable_contiguous(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"contiguous(*, MemoryFormat memory_format=contiguous_format)",
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
auto memory_format = r.memoryformat(0);
|
|
// avoids touching the GIL or current device if self is already contiguous
|
|
if (self_.is_contiguous(memory_format)) {
|
|
// NOTE: this logic is duplicated from VariableType.cpp. Since we need to
|
|
// record this call to contiguous() in the trace regardless of whether
|
|
// we actually call contiguous here, we need to record this information
|
|
// manually.
|
|
if (jit::tracer::isTracing()) {
|
|
auto tracer_state = jit::tracer::getTracingState();
|
|
auto node = tracer_state->graph->create(jit::aten::contiguous, /*num_outputs=*/0);
|
|
jit::tracer::recordSourceLocation(node);
|
|
jit::tracer::addInputs(node, "self", self_);
|
|
jit::tracer::addInputs(node, "memory_format", memory_format);
|
|
tracer_state->graph->insertNode(node);
|
|
jit::tracer::addOutput(node, self_);
|
|
}
|
|
Py_INCREF(self);
|
|
return self;
|
|
}
|
|
return THPVariable_Wrap(dispatch_contiguous(self_, memory_format));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static Tensor dispatch_copy_(const Tensor & self, const Tensor & other, bool non_blocking) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
OptionalDeviceGuard device_guard(device_of(self));
|
|
return self.copy_(other, non_blocking);
|
|
}
|
|
|
|
static PyObject * THPVariable_copy_(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"copy_(Tensor other, bool non_blocking=False)",
|
|
"copy_(Tensor other, bool async=False)|deprecated"
|
|
});
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<2> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
return THPVariable_Wrap(dispatch_copy_(self_, r.tensor(0), r.toBool(1)));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static double dispatch_to_CDouble(const Tensor & self) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
OptionalDeviceGuard device_guard(device_of(self));
|
|
if (self.numel() != 1) {
|
|
throw ValueError("only one element tensors can be converted to Python scalars");
|
|
}
|
|
return self.item<double>();
|
|
}
|
|
|
|
static c10::complex<double> dispatch_to_CComplexDouble(const Tensor & self) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
OptionalDeviceGuard device_guard(device_of(self));
|
|
if (self.numel() != 1) {
|
|
throw ValueError("only one element tensors can be converted to Python scalars");
|
|
}
|
|
return self.item<c10::complex<double>>();
|
|
}
|
|
|
|
static int64_t dispatch_to_CLong(const Tensor & self) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
OptionalDeviceGuard device_guard(device_of(self));
|
|
if (self.numel() != 1) {
|
|
throw ValueError("only one element tensors can be converted to Python scalars");
|
|
}
|
|
return self.item<int64_t>();
|
|
}
|
|
|
|
static bool dispatch_to_Bool(const Tensor & self) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
OptionalDeviceGuard device_guard(device_of(self));
|
|
if (self.numel() != 1) {
|
|
throw ValueError("only one element tensors can be converted to Python scalars");
|
|
}
|
|
return self.item<bool>();
|
|
}
|
|
|
|
static PyObject * THPVariable_float_scalar(PyObject* self, PyObject* args) {
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "__float__", args);
|
|
}
|
|
jit::tracer::warn("Converting a tensor to a Python float", jit::tracer::WARN_PYTHON_DATAFLOW);
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
return wrap(dispatch_to_CDouble(self_));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_complex_scalar(PyObject* self, PyObject* args) {
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "__complex__", args);
|
|
}
|
|
jit::tracer::warn("Converting a tensor to a Python complex", jit::tracer::WARN_PYTHON_DATAFLOW);
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
return wrap(dispatch_to_CComplexDouble(self_));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_integral_scalar(PyObject* self, PyObject* args) {
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "__int__", args);
|
|
}
|
|
jit::tracer::warn("Converting a tensor to a Python integer", jit::tracer::WARN_PYTHON_DATAFLOW);
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
if (isFloatingType(self_.scalar_type())) {
|
|
// we can't dispatch to item<int64_t> here because we want to avoid ATen overflow checks;
|
|
// the python integral type (long in python2) can't overflow.
|
|
return THPUtils_packDoubleAsInt(dispatch_to_CDouble(self_));
|
|
} else {
|
|
return wrap(dispatch_to_CLong(self_));
|
|
}
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// This is the __index__ function in Python which is similar to __int__, but
|
|
// called when used as a slice.
|
|
static PyObject * THPVariable_index_scalar(PyObject* self, PyObject* args) {
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "__index__", args);
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
// TODO: change the condition to `self_.dim() != 0` once we expose scalars
|
|
// in PyTorch.
|
|
if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true) || self_.numel() != 1) {
|
|
throw TypeError("only integer tensors of a single element can be converted to an index");
|
|
}
|
|
return wrap(dispatch_to_CLong(self_));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static Tensor dispatch_invert(const Tensor & self) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
OptionalDeviceGuard device_guard(device_of(self));
|
|
return self.bitwise_not();
|
|
}
|
|
|
|
static PyObject * THPVariable_invert(PyObject* self, PyObject* args) {
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "__invert__", args);
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
if (!isIntegralType(self_.scalar_type(), /*includeBool=*/true)) {
|
|
throw TypeError("~ (operator.invert) is only implemented on integer and Boolean-type tensors");
|
|
}
|
|
return THPVariable_Wrap(dispatch_invert(self_));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
// NOTE: this is where we record aten::to in the graph during tracing. However, the behavior of aten::to
|
|
// is different with respect to TensorOptions fields that are not present: aten::to inherits fields that
|
|
// are missing from the self argument while the tracer assumes that they should be populated with the
|
|
// default values (eg. float for scalar type). By explicitly copying over the tensor options here we fully
|
|
// specify all tensor options and thus record the proper trace
|
|
return self.to(self.options().device(device).memory_format(optional_memory_format), non_blocking, copy);
|
|
}
|
|
|
|
static Tensor dispatch_to(const Tensor & self, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
|
|
AutoNoGIL no_gil;
|
|
return self.to(self.options().memory_format(optional_memory_format), non_blocking, copy);
|
|
}
|
|
|
|
static Tensor dispatch_to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
// TODO: Make this call the TensorOptions version, maybe?
|
|
return self.to(dtype, non_blocking, copy, optional_memory_format);
|
|
}
|
|
|
|
static Tensor dispatch_to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
// TODO: Make this call the TensorOptions version, maybe?
|
|
return self.to(device, dtype, non_blocking, copy, optional_memory_format);
|
|
}
|
|
|
|
static PyObject * THPVariable_cpu(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"cpu(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_Wrap(dispatch_to(self_, at::Device(at::DeviceType::CPU), false, false, opt_memory_format));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static Tensor dispatch_nonzero(const Tensor & self) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
OptionalDeviceGuard device_guard(device_of(self));
|
|
return self.nonzero();
|
|
}
|
|
|
|
static std::vector<Tensor> dispatch_nonzero_numpy(const Tensor & self) {
|
|
pybind11::gil_scoped_release no_gil;
|
|
OptionalDeviceGuard device_guard(device_of(self));
|
|
return self.nonzero_numpy();
|
|
}
|
|
|
|
static PyObject * THPVariable_nonzero(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"nonzero()",
|
|
"nonzero(*, bool as_tuple)",
|
|
});
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<2> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
if (r.idx == 0 || (r.idx == 1 && !r.toBool(0))) {
|
|
return wrap(dispatch_nonzero(self_));
|
|
} else {
|
|
return wrap(dispatch_nonzero_numpy(self_));
|
|
}
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_cuda(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"cuda(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
|
|
"cuda(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
|
|
});
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<3> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto device = r.isNone(0) ? at::Device(at::DeviceType::CUDA) : r.device(0);
|
|
auto opt_memory_format = r.memoryformatOptional(2);
|
|
TORCH_CHECK(device.is_cuda(), "Invalid device, must be cuda device");
|
|
torch::utils::cuda_lazy_init();
|
|
return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_xpu(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"xpu(Device? device=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
|
|
"xpu(Device? device=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
|
|
});
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<3> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if (r.has_torch_function()) {
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto device = r.isNone(0) ? at::Device(at::DeviceType::XPU) : r.device(0);
|
|
auto opt_memory_format = r.memoryformatOptional(2);
|
|
TORCH_CHECK(device.is_xpu(), "Invalid device, must be xpu device");
|
|
return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false, opt_memory_format));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_to_type(PyObject* self, ScalarType scalarType, c10::optional<c10::MemoryFormat> optional_memory_format) {
|
|
HANDLE_TH_ERRORS
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
return THPVariable_Wrap(dispatch_to(self_, scalarType, false, false, optional_memory_format));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_byte(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"byte(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::Byte, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_char(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"char(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::Char, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_double(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"double(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::Double, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_float(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"float(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::Float, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_cdouble(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"cdouble(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::ComplexDouble, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_cfloat(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"cfloat(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::ComplexFloat, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_half(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"half(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::Half, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_int(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"int(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::Int, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_long(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"long(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::Long, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_short(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"short(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::Short, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_bool(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"bool(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::Bool, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_bfloat16(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"bfloat16(*, MemoryFormat? memory_format=None)"
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto opt_memory_format = r.memoryformatOptional(0);
|
|
return THPVariable_to_type(self, ScalarType::BFloat16, opt_memory_format);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_element_size(PyObject* self, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "element_size", args);
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
return THPUtils_packInt64(self_.element_size());
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object bc PyObjects not declarable in native_functions.yaml
|
|
// See: ATen/native/README.md for more context
|
|
static PyObject * THPVariable_numpy(PyObject* self, PyObject* arg)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "numpy");
|
|
}
|
|
jit::tracer::warn("Converting a tensor to a NumPy array", jit::tracer::WARN_PYTHON_DATAFLOW);
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
return torch::utils::tensor_to_numpy(self_);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_requires_grad_(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"requires_grad_(bool requires_grad=True)",
|
|
});
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
auto requires_grad = r.toBool(0);
|
|
// should we throw if requires_grad is true? var.requires_grad = True throws here
|
|
// but it's nice to let this be a no-op.
|
|
if (!self_.is_leaf() && !requires_grad) {
|
|
throw std::runtime_error(autograd::utils::requires_grad_leaf_error(requires_grad));
|
|
}
|
|
if (requires_grad && ! isDifferentiableType(at::typeMetaToScalarType(self_.dtype()))) {
|
|
throw std::runtime_error("only Tensors of floating point dtype can require gradients");
|
|
}
|
|
self_.set_requires_grad(requires_grad);
|
|
return THPVariable_Wrap(self_);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
inline bool dispatch_is_contiguous(const Tensor & self, MemoryFormat memory_format) {
|
|
return self.is_contiguous(memory_format);
|
|
}
|
|
|
|
// implemented on the python object to avoid dispatch overhead
|
|
static PyObject * THPVariable_is_contiguous(PyObject* self_, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"is_contiguous(*, MemoryFormat memory_format=contiguous_format)",
|
|
});
|
|
ParsedArgs<1> parsed_args;
|
|
auto r = parser.parse(self_, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self_, args, kwargs, PyObject_Type(self_), "torch.Tensor");
|
|
}
|
|
|
|
auto memory_format = r.memoryformat(0);
|
|
auto& self = THPVariable_Unpack(self_);
|
|
return wrap(dispatch_is_contiguous(self, memory_format));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object to avoid dispatch overhead
|
|
static PyObject * THPVariable_item(PyObject* self, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "item", args);
|
|
}
|
|
jit::tracer::warn("Converting a tensor to a Python number", jit::tracer::WARN_PYTHON_DATAFLOW);
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
if (self_.is_floating_point()) {
|
|
return wrap(dispatch_to_CDouble(self_));
|
|
} else if (self_.is_complex()) {
|
|
return wrap(dispatch_to_CComplexDouble(self_));
|
|
} else if (self_.scalar_type() == ScalarType::Bool) {
|
|
return wrap(dispatch_to_Bool(self_));
|
|
} else {
|
|
return wrap(dispatch_to_CLong(self_));
|
|
}
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object bc no support for first class functions in native_functions.yaml
|
|
// See: ATen/native/README.md for more context
|
|
static PyObject * THPVariable_map_(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({ "map_(Tensor other, PyObject* callable)" });
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<2> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
Variable other = r.tensor(0);
|
|
if (self_.requires_grad() || other.requires_grad()) {
|
|
throw std::runtime_error(
|
|
"Can't call map_() on Variable that requires grad. Use "
|
|
"var.detach().map_() instead.");
|
|
}
|
|
return THPVariable_Wrap(torch::utils::map_(self_, other, r.pyobject(1)));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object bc no support for first class functions in native_functions.yaml
|
|
// See: ATen/native/README.md for more context
|
|
static PyObject * THPVariable_map2_(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({ "map2_(Tensor x, Tensor y, PyObject* callable)" });
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<3> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
Variable x = r.tensor(0);
|
|
Variable y = r.tensor(1);
|
|
if (self_.requires_grad() || x.requires_grad() || y.requires_grad()) {
|
|
throw std::runtime_error(
|
|
"Can't call map2_() on Variable that requires grad. Use "
|
|
"var.detach().map2_() instead.");
|
|
}
|
|
return THPVariable_Wrap(torch::utils::map2_(self_, x, y, r.pyobject(2)));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_new(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "new", args, kwargs);
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
OptionalDeviceGuard device_guard(device_of(self_));
|
|
return THPVariable_Wrap(torch::utils::legacy_tensor_new(legacyExtractDispatchKey(self_), self_.scalar_type(), args, kwargs));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_new_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "new_tensor", args, kwargs);
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
OptionalDeviceGuard device_guard(device_of(self_));
|
|
return THPVariable_Wrap(torch::utils::new_tensor(legacyExtractDispatchKey(self_), self_.scalar_type(), args, kwargs));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_storage(PyObject* self, PyObject* arg)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "storage");
|
|
}
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
return createPyObject(self_.storage(), self_.dtype());
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_to(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"to(Device device=None, ScalarType dtype=None, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
|
|
"to(ScalarType dtype, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
|
|
"to(Tensor tensor, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
|
|
});
|
|
ParsedArgs<5> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
if (r.has_torch_function()) {
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
auto parsed = parse_to_conversion(r, /*allow_copy*/ true);
|
|
auto& device = std::get<0>(parsed);
|
|
auto& scalarType = std::get<1>(parsed);
|
|
auto non_blocking = std::get<2>(parsed);
|
|
auto copy = std::get<3>(parsed);
|
|
auto opt_memory_format = std::get<4>(parsed);
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
if (device && device->is_cuda()) {
|
|
torch::utils::cuda_lazy_init();
|
|
}
|
|
if (!device && !scalarType && !copy && !opt_memory_format.has_value()) {
|
|
Py_INCREF(self);
|
|
return self;
|
|
} else if (!device && !scalarType) {
|
|
return THPVariable_Wrap(
|
|
dispatch_to(self_, non_blocking, copy, opt_memory_format));
|
|
} else if (!device) {
|
|
return THPVariable_Wrap(dispatch_to(self_, *scalarType, non_blocking, copy, opt_memory_format));
|
|
} else if (!scalarType) {
|
|
return THPVariable_Wrap(dispatch_to(self_, *device, non_blocking, copy, opt_memory_format));
|
|
} else {
|
|
return THPVariable_Wrap(dispatch_to(self_, *device, *scalarType, non_blocking, copy, opt_memory_format));
|
|
}
|
|
Py_RETURN_NONE;
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// implemented on the python object b/c arbitrarily nested list not declarable in native_functions.yaml
|
|
// See: ATen/native/README.md for more context
|
|
static PyObject * THPVariable_tolist(PyObject* self, PyObject* args)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
if (check_has_torch_function(self)) {
|
|
return handle_torch_function(self, "tolist", args);
|
|
}
|
|
jit::tracer::warn("Converting a tensor to a Python list", jit::tracer::WARN_PYTHON_DATAFLOW);
|
|
auto self_ = THPVariable_Unpack(self);
|
|
return torch::utils::tensor_to_list(self_);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwargs)
|
|
{
|
|
HANDLE_TH_ERRORS
|
|
static PythonArgParser parser({
|
|
"type(PyObject* dtype=None, bool non_blocking=False, *, MemoryFormat? memory_format=None)",
|
|
"type(PyObject* dtype=None, bool async=False, *, MemoryFormat? memory_format=None)|deprecated"
|
|
});
|
|
auto& self_ = THPVariable_Unpack(self);
|
|
ParsedArgs<3> parsed_args;
|
|
auto r = parser.parse(self, args, kwargs, parsed_args);
|
|
|
|
if(r.has_torch_function()){
|
|
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
|
}
|
|
|
|
if (r.isNone(0)) {
|
|
return THPUtils_packString(torch::utils::options_to_string(self_.options()));
|
|
}
|
|
auto obj = r.pyobject(0);
|
|
auto opt_memory_format = r.memoryformatOptional(2);
|
|
std::string type_name;
|
|
bool is_dtype = false;
|
|
if (PyType_Check(obj)) {
|
|
if (obj == THPVariableClass) {
|
|
type_name = "torch.Tensor";
|
|
} else {
|
|
type_name = ((PyTypeObject*)obj)->tp_name;
|
|
}
|
|
} else if (THPUtils_checkString(obj)) {
|
|
type_name = THPUtils_unpackString(obj);
|
|
} else if (THPDtype_Check(obj)) {
|
|
is_dtype = true;
|
|
} else {
|
|
throw TypeError("dtype must be a type, str, or dtype object");
|
|
}
|
|
ScalarType scalar_type;
|
|
Device device = self_.device();
|
|
if (is_dtype) {
|
|
scalar_type = r.scalartype(0);
|
|
} else {
|
|
at::TensorOptions options = torch::utils::options_from_string(type_name);
|
|
scalar_type = at::typeMetaToScalarType(options.dtype());
|
|
auto device_type = options.device().type();
|
|
if (device_type != device.type()) {
|
|
device = at::Device(device_type);
|
|
}
|
|
}
|
|
if (device.is_cuda()) {
|
|
torch::utils::cuda_lazy_init();
|
|
}
|
|
return THPVariable_Wrap(dispatch_to(self_, device, scalar_type, /*non_blocking=*/ r.toBool(1), /*copy=*/ false, opt_memory_format));
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// generated methods start here
|
|
|
|
${py_methods}
|
|
|
|
static PyObject * THPVariable_bool_scalar(PyObject* self, PyObject* args) {
|
|
if (check_has_torch_function(self)) {
|
|
HANDLE_TH_ERRORS
|
|
return handle_torch_function(self, "__bool__", args);
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
jit::tracer::warn("Converting a tensor to a Python boolean", jit::tracer::WARN_PYTHON_DATAFLOW);
|
|
return THPVariable_is_nonzero(self, args);
|
|
}
|
|
|
|
// Wrapper converts a raised TypeError into returning NotImplemented
|
|
// Used to implement binary arithmetic operators
|
|
template <PyObject* (*Func)(PyObject*, PyObject*, PyObject*)>
|
|
static PyObject * TypeError_to_NotImplemented_(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
|
|
PyObject* ret = Func(self, args, kwargs);
|
|
if (!ret && PyErr_ExceptionMatches(PyExc_TypeError)) {
|
|
PyErr_Clear();
|
|
Py_INCREF(Py_NotImplemented);
|
|
ret = Py_NotImplemented;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
// set_ has to be defined in the template because the c10::Storage object
|
|
// does not have a type, and we need to make sure the Python storage object's
|
|
// type matches the tensor's type
|
|
static PyObject* THPVariable_set_(
|
|
PyObject* self_,
|
|
PyObject* args,
|
|
PyObject* kwargs) {
|
|
HANDLE_TH_ERRORS
|
|
const Tensor& self = THPVariable_Unpack(self_);
|
|
static PythonArgParser parser(
|
|
{
|
|
"set_()",
|
|
"set_(Storage source)",
|
|
"set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride=None)",
|
|
"set_(Tensor source)",
|
|
},
|
|
/*traceable=*/false);
|
|
|
|
ParsedArgs<4> parsed_args;
|
|
auto _r = parser.parse(args, kwargs, parsed_args);
|
|
|
|
switch (_r.idx) {
|
|
case 0: {
|
|
// aten::set_(Tensor(a!) self) -> Tensor(a!)
|
|
auto dispatch_set_ = [](const Tensor& self) -> Tensor {
|
|
pybind11::gil_scoped_release no_gil;
|
|
return self.set_();
|
|
};
|
|
return wrap(dispatch_set_(self));
|
|
}
|
|
case 1: {
|
|
// aten::set_.source_Storage(Tensor(a!) self, Storage source) ->
|
|
// Tensor(a!)
|
|
at::ScalarType storage_scalar_type;
|
|
bool is_typed_storage = true;
|
|
at::Storage storage = _r.storage(0, storage_scalar_type, is_typed_storage);
|
|
TORCH_CHECK(storage_scalar_type == self.dtype() || !is_typed_storage,
|
|
"Expected a Storage of type ", self.dtype(),
|
|
" or an UntypedStorage, but got type ", storage_scalar_type,
|
|
" for argument 1 'storage'");
|
|
auto dispatch_set_ = [](const Tensor& self, Storage source) -> Tensor {
|
|
pybind11::gil_scoped_release no_gil;
|
|
return self.set_(source);
|
|
};
|
|
return wrap(dispatch_set_(self, storage));
|
|
}
|
|
case 2: {
|
|
// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage
|
|
// source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!)
|
|
at::ScalarType storage_scalar_type;
|
|
bool is_typed_storage = true;
|
|
at::Storage storage = _r.storage(0, storage_scalar_type, is_typed_storage);
|
|
TORCH_CHECK(storage_scalar_type == self.dtype() || !is_typed_storage,
|
|
"Expected a Storage of type ", self.dtype(),
|
|
" or an UntypedStorage, but got type ", storage_scalar_type,
|
|
" for argument 1 'storage'");
|
|
auto dispatch_set_ = [](const Tensor& self,
|
|
Storage source,
|
|
int64_t storage_offset,
|
|
IntArrayRef size,
|
|
IntArrayRef stride) -> Tensor {
|
|
pybind11::gil_scoped_release no_gil;
|
|
return self.set_(source, storage_offset, size, stride);
|
|
};
|
|
return wrap(dispatch_set_(
|
|
self, storage, _r.toInt64(1), _r.intlist(2), _r.intlist(3)));
|
|
}
|
|
case 3: {
|
|
// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
|
|
auto dispatch_set_ = [](const Tensor& self, const Tensor& source) -> Tensor {
|
|
TORCH_INTERNAL_ASSERT(source.dtype() == self.dtype());
|
|
pybind11::gil_scoped_release no_gil;
|
|
return self.set_(source);
|
|
};
|
|
return wrap(dispatch_set_(self, _r.tensor(0)));
|
|
}
|
|
}
|
|
Py_RETURN_NONE;
|
|
END_HANDLE_TH_ERRORS
|
|
}
|
|
|
|
// XXX: ops that are bound here are not exposed to the C++ api nor the JIT.
|
|
// Any new ops added here should be accompanied with a comment why they are not
|
|
// being registered through native_functions.yaml, and be tagged cpp / JIT
|
|
PyMethodDef variable_methods[] = {
|
|
// These magic methods are all implemented on python object to wrap NotImplementedError
|
|
{"__add__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_add>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__radd__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_add>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__iadd__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_add_>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__rmul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_mul>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__mul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_mul>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__imul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_mul_>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__sub__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_sub>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__isub__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_sub_>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__div__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_div>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__truediv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_div>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__floordiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_floor_divide>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__idiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_div_>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__ifloordiv__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_floor_divide_>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__mod__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_remainder>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__imod__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_remainder_>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__eq__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_eq>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__ne__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_ne>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__lt__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_lt>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__le__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_le>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__gt__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_gt>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__ge__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_ge>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__rand__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_bitwise_and>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__ror__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_bitwise_or>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__rxor__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_bitwise_xor>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"__bool__", THPVariable_bool_scalar, METH_NOARGS, NULL},
|
|
{"__float__", THPVariable_float_scalar, METH_NOARGS, NULL},
|
|
{"__complex__", THPVariable_complex_scalar, METH_NOARGS, NULL},
|
|
{"__int__", THPVariable_integral_scalar, METH_NOARGS, NULL},
|
|
{"__long__", THPVariable_integral_scalar, METH_NOARGS, NULL},
|
|
{"__index__", THPVariable_index_scalar, METH_NOARGS, NULL},
|
|
{"__nonzero__", THPVariable_bool_scalar, METH_NOARGS, NULL},
|
|
{"__invert__", THPVariable_invert, METH_NOARGS, NULL},
|
|
{"__matmul__", castPyCFunctionWithKeywords(TypeError_to_NotImplemented_<THPVariable_matmul>), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"_is_view", THPVariable__is_view, METH_NOARGS, NULL},
|
|
{"apply_", THPVariable_apply_, METH_O, NULL},
|
|
{"bfloat16", castPyCFunctionWithKeywords(THPVariable_bfloat16), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"byte", castPyCFunctionWithKeywords(THPVariable_byte), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"char", castPyCFunctionWithKeywords(THPVariable_char), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"contiguous", castPyCFunctionWithKeywords(THPVariable_contiguous), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"copy_", castPyCFunctionWithKeywords(THPVariable_copy_), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"cpu", castPyCFunctionWithKeywords(THPVariable_cpu), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"cuda", castPyCFunctionWithKeywords(THPVariable_cuda), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"xpu", castPyCFunctionWithKeywords(THPVariable_xpu), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"data_ptr", THPVariable_data_ptr, METH_NOARGS, NULL},
|
|
{"dim", THPVariable_dim, METH_NOARGS, NULL},
|
|
{"has_names", THPVariable_has_names, METH_NOARGS, NULL},
|
|
{"double", castPyCFunctionWithKeywords(THPVariable_double), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"cdouble", castPyCFunctionWithKeywords(THPVariable_cdouble), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"element_size", THPVariable_element_size, METH_NOARGS, NULL},
|
|
{"float", castPyCFunctionWithKeywords(THPVariable_float), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"cfloat", castPyCFunctionWithKeywords(THPVariable_cfloat), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"get_device", THPVariable_get_device, METH_NOARGS, NULL},
|
|
{"bool", castPyCFunctionWithKeywords(THPVariable_bool), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"half", castPyCFunctionWithKeywords(THPVariable_half), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"int", castPyCFunctionWithKeywords(THPVariable_int), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"is_contiguous", castPyCFunctionWithKeywords(THPVariable_is_contiguous), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"item", THPVariable_item, METH_NOARGS, NULL},
|
|
{"long", castPyCFunctionWithKeywords(THPVariable_long), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"map_", castPyCFunctionWithKeywords(THPVariable_map_), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"map2_", castPyCFunctionWithKeywords(THPVariable_map2_), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"ndimension", THPVariable_dim, METH_NOARGS, NULL},
|
|
{"nelement", THPVariable_numel, METH_NOARGS, NULL},
|
|
{"new", castPyCFunctionWithKeywords(THPVariable_new), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"new_tensor", castPyCFunctionWithKeywords(THPVariable_new_tensor), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"nonzero", castPyCFunctionWithKeywords(THPVariable_nonzero), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"numel", THPVariable_numel, METH_NOARGS, NULL},
|
|
{"numpy", THPVariable_numpy, METH_NOARGS, NULL},
|
|
{"requires_grad_", castPyCFunctionWithKeywords(THPVariable_requires_grad_), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"set_", castPyCFunctionWithKeywords(THPVariable_set_), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"short", castPyCFunctionWithKeywords(THPVariable_short), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"size", castPyCFunctionWithKeywords(THPVariable_size), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"_storage", THPVariable_storage, METH_NOARGS, NULL},
|
|
{"storage_offset", THPVariable_storage_offset, METH_NOARGS, NULL},
|
|
{"stride", castPyCFunctionWithKeywords(THPVariable_stride), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"to", castPyCFunctionWithKeywords(THPVariable_to), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
{"tolist", THPVariable_tolist, METH_NOARGS, NULL},
|
|
{"type", castPyCFunctionWithKeywords(THPVariable_type), METH_VARARGS | METH_KEYWORDS, NULL},
|
|
${py_method_defs}
|
|
{NULL}
|
|
};
|
|
|
|
}} // namespace torch::autograd
|