mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Adds the ability for all backward functions to accept undefined output gradient arguments. An undefined gradient is a Tensor that was created by the argumentless constructor `at::Tensor()`, where `tensor.defined() == false`. Also adds new autograd nodes, UndefinedGrad and UndefinedGradBackward, that can be used from within Python code to inject undefined gradients into a backward function. A new test case is added to the backward function unit tests to use the UndefinedGrad node to ensure that undefined gradients do not break any backward functions. Closes https://github.com/pytorch/pytorch/issues/33138 Pull Request resolved: https://github.com/pytorch/pytorch/pull/39400 Differential Revision: D21936588 Pulled By: albanD fbshipit-source-id: eccc5f55c77babe6dadcea4249d0c68a3c64e85d
59 lines
1.8 KiB
C++
59 lines
1.8 KiB
C++
#include <torch/csrc/autograd/functions/utils.h>
|
|
|
|
#include <torch/csrc/autograd/edge.h>
|
|
#include <torch/csrc/autograd/function.h>
|
|
#include <torch/csrc/autograd/variable.h>
|
|
|
|
#include <sstream>
|
|
#include <vector>
|
|
|
|
namespace torch { namespace autograd {
|
|
|
|
variable_list wrap_outputs(const variable_list& inputs, tensor_list&& outputs,
|
|
const function_constructor& ctr) {
|
|
variable_list result;
|
|
result.reserve(outputs.size());
|
|
if (!any_variable_requires_grad(inputs)) {
|
|
for (auto& output : outputs) {
|
|
if (output.defined()) {
|
|
result.push_back(make_variable(output, /*requires_grad=*/false));
|
|
} else {
|
|
result.emplace_back();
|
|
}
|
|
}
|
|
} else {
|
|
auto grad_fn = ctr(collect_next_edges(inputs));
|
|
for (auto& output : outputs) {
|
|
if (output.defined()) {
|
|
auto variable = autograd::make_variable(output, /*requires_grad=*/false);
|
|
autograd::create_gradient_edge(variable, grad_fn);
|
|
result.push_back(std::move(variable));
|
|
} else {
|
|
grad_fn->add_input_metadata(Node::undefined_input());
|
|
result.emplace_back();
|
|
}
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
void check_input_variables(const char* name, const variable_list& inputs, int args, int required_args, bool allow_undefined) {
|
|
if (required_args == -1) {
|
|
required_args = args;
|
|
}
|
|
if (inputs.size() != (size_t)args) {
|
|
std::stringstream ss;
|
|
ss << name << ": expected " << args << " arguments (got " << inputs.size();
|
|
ss << ")";
|
|
throw std::runtime_error(ss.str());
|
|
}
|
|
for (int i = 0; i < required_args; ++i) {
|
|
if (!inputs[i].defined() && !allow_undefined) {
|
|
std::stringstream ss;
|
|
ss << name << ": expected Tensor at argument " << i << " (got None)";
|
|
throw std::runtime_error(ss.str());
|
|
}
|
|
}
|
|
}
|
|
}} // namespace torch::autograd
|