mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/52258 Removes deprecated preprocess method from the backend interface. Preprocessing logic should be now registered along with the backend interface (i.e. PyTorchBackendInterface) via the BackendPreprocessFunction. Also refactored internal dependencies. ghstack-source-id: 121704837 Test Plan: Validates all related tests pass: buck test mode/dev //caffe2/test/cpp/jit:jit -- --exact 'caffe2/test/cpp/jit:jit - BackendTest.ToBackend' python test/test_jit.py TestBackends ===== Glow buck test mode/dev //glow/fb/torch_glow/tests:TorchGlowBackendTests buck test mode/dev //glow/fb/torch_glow/tests:torch_glow_backend_tests Reviewed By: jackm321 Differential Revision: D26443479 fbshipit-source-id: afdc51ae619ced293d10c7a6a12f3530e4c4e53c
85 lines
2.6 KiB
C++
85 lines
2.6 KiB
C++
#include <torch/csrc/jit/backends/backend.h>
|
|
|
|
namespace torch {
|
|
namespace custom_backend {
|
|
// This custom JIT backend is intended to do the minimal amount of work
|
|
// necessary to test that the JIT backend registration endpoints and
|
|
// code generation are working correctly. It is not intended to
|
|
// produce numerically correct results.
|
|
class CustomBackend : public torch::jit::PyTorchBackendInterface {
|
|
public:
|
|
// Constructor.
|
|
explicit CustomBackend() {}
|
|
virtual ~CustomBackend() = default;
|
|
|
|
c10::impl::GenericDict compile(
|
|
c10::IValue processed,
|
|
c10::impl::GenericDict method_compile_spec) override {
|
|
auto spec =
|
|
c10::impl::toTypedDict<std::string, at::IValue>(method_compile_spec);
|
|
|
|
// Return the same string as a value for every key in method_compile_spec.
|
|
auto handles = c10::Dict<std::string, std::string>();
|
|
for (auto it = spec.begin(), end = spec.end(); it != end; ++it) {
|
|
handles.insert(it->key(), it->key());
|
|
}
|
|
return c10::impl::toGenericDict(handles);
|
|
}
|
|
c10::impl::GenericList execute(
|
|
c10::IValue handle,
|
|
c10::impl::GenericList inputs) override {
|
|
TORCH_INTERNAL_ASSERT(handle.isString());
|
|
TORCH_INTERNAL_ASSERT(inputs.size() > 0);
|
|
|
|
c10::List<at::Tensor> output_list;
|
|
|
|
// Implement simple accumulator and negative accumulator (?) ops. Return one
|
|
// or both of them depending on the handle to make sure multiple outputs are
|
|
// handled.
|
|
c10::IValue value = inputs[0];
|
|
at::Tensor accum = value.toTensor();
|
|
accum = accum.clone();
|
|
at::Tensor sub_accum = value.toTensor();
|
|
sub_accum = sub_accum.clone();
|
|
|
|
for (size_t i = 1, e = inputs.size(); i < e; ++i) {
|
|
value = inputs[i];
|
|
accum.add_(value.toTensor(), 1.0);
|
|
sub_accum.sub_(value.toTensor(), 1.0);
|
|
}
|
|
|
|
if (handle.toStringRef() == "accum") {
|
|
output_list.emplace_back(accum);
|
|
} else if (handle.toStringRef() == "sub_accum") {
|
|
output_list.emplace_back(sub_accum);
|
|
} else if (handle.toStringRef() == "forward") {
|
|
output_list.emplace_back(accum);
|
|
output_list.emplace_back(sub_accum);
|
|
}
|
|
|
|
return c10::impl::toList(output_list);
|
|
}
|
|
};
|
|
|
|
c10::IValue preprocess(
|
|
const torch::jit::Module& mod,
|
|
const c10::Dict<c10::IValue, c10::IValue>& method_compile_spec) {
|
|
return mod._ivalue();
|
|
}
|
|
|
|
// clang-format off
|
|
# if defined(_WIN32)
|
|
# if defined(custom_ops_EXPORTS)
|
|
# define CUSTOM_BACKEND_API __declspec(dllexport)
|
|
# else
|
|
# define CUSTOM_BACKEND_API __declspec(dllimport)
|
|
# endif
|
|
# else
|
|
# define CUSTOM_BACKEND_API
|
|
# endif
|
|
// clang-format on
|
|
|
|
CUSTOM_BACKEND_API std::string getBackendName();
|
|
} // namespace custom_backend
|
|
} // namespace torch
|