mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: This PR aims to re-organize C++ API `torch::nn` folder structure in the following way: - Every module in `torch/csrc/api/include/torch/nn/modules/` (except `any.h`, `named_any.h`, `modulelist.h`, `sequential.h`, `embedding.h`) has a strictly equivalent Python file in `torch/nn/modules/`. For example: `torch/csrc/api/include/torch/nn/modules/pooling.h` -> `torch/nn/modules/pooling.py` `torch/csrc/api/include/torch/nn/modules/conv.h` -> `torch/nn/modules/conv.py` `torch/csrc/api/include/torch/nn/modules/batchnorm.h` -> `torch/nn/modules/batchnorm.py` `torch/csrc/api/include/torch/nn/modules/sparse.h` -> `torch/nn/modules/sparse.py` - Containers such as `any.h`, `named_any.h`, `modulelist.h`, `sequential.h` are moved into `torch/csrc/api/include/torch/nn/modules/container/`, because their implementations are too long to be combined into one file (like `torch/nn/modules/container.py` in Python API) - `embedding.h` is not renamed to `sparse.h` yet, because we have another work stream that works on API parity for Embedding and EmbeddingBag, and renaming the file would cause conflict. After the embedding API parity work is done, we will rename `embedding.h` to `sparse.h` to match the Python file name, and move the embedding options out to options/ folder. - `torch/csrc/api/include/torch/nn/functional/` is added, and the folder structure mirrors that of `torch/csrc/api/include/torch/nn/modules/`. For example, `torch/csrc/api/include/torch/nn/functional/pooling.h` contains the functions for pooling, which are then used by the pooling modules in `torch/csrc/api/include/torch/nn/modules/pooling.h`. - `torch/csrc/api/include/torch/nn/options/` is added, and the folder structure mirrors that of `torch/csrc/api/include/torch/nn/modules/`. For example, `torch/csrc/api/include/torch/nn/options/pooling.h` contains MaxPoolOptions, which is used by both MaxPool modules in `torch/csrc/api/include/torch/nn/modules/pooling.h`, and max_pool functions in `torch/csrc/api/include/torch/nn/functional/pooling.h`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/26262 Differential Revision: D17422426 Pulled By: yf225 fbshipit-source-id: c413d2a374ba716dac81db31516619bbd879db7f
102 lines
2.7 KiB
C++
102 lines
2.7 KiB
C++
#include <gtest/gtest.h>
|
|
|
|
#include <torch/detail/static.h>
|
|
#include <torch/csrc/utils/variadic.h>
|
|
#include <torch/torch.h>
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
template <
|
|
typename T,
|
|
typename = torch::enable_if_t<!torch::detail::is_module<T>::value>>
|
|
bool f(T&& m) {
|
|
return false;
|
|
}
|
|
|
|
template <typename T>
|
|
torch::detail::enable_if_module_t<T, bool> f(T&& m) {
|
|
return true;
|
|
}
|
|
|
|
TEST(TestStatic, AllOf) {
|
|
ASSERT_TRUE(torch::all_of<>::value);
|
|
ASSERT_TRUE(torch::all_of<true>::value);
|
|
ASSERT_TRUE((torch::all_of<true, true, true>::value));
|
|
ASSERT_FALSE(torch::all_of<false>::value);
|
|
ASSERT_FALSE((torch::all_of<false, false, false>::value));
|
|
ASSERT_FALSE((torch::all_of<true, true, false>::value));
|
|
}
|
|
|
|
TEST(TestStatic, AnyOf) {
|
|
ASSERT_FALSE(torch::any_of<>::value);
|
|
ASSERT_TRUE(bool((torch::any_of<true>::value)));
|
|
ASSERT_TRUE(bool((torch::any_of<true, true, true>::value)));
|
|
ASSERT_FALSE(bool((torch::any_of<false>::value)));
|
|
}
|
|
|
|
TEST(TestStatic, EnableIfModule) {
|
|
ASSERT_TRUE(f(torch::nn::LinearImpl(1, 2)));
|
|
ASSERT_FALSE(f(5));
|
|
ASSERT_TRUE(torch::detail::check_not_lvalue_references<int>());
|
|
ASSERT_TRUE((torch::detail::check_not_lvalue_references<float, int, char>()));
|
|
ASSERT_FALSE(
|
|
(torch::detail::check_not_lvalue_references<float, int&, char>()));
|
|
ASSERT_TRUE(torch::detail::check_not_lvalue_references<std::string>());
|
|
ASSERT_FALSE(torch::detail::check_not_lvalue_references<std::string&>());
|
|
}
|
|
|
|
struct A : torch::nn::Module {
|
|
int forward() {
|
|
return 5;
|
|
}
|
|
};
|
|
|
|
struct B : torch::nn::Module {
|
|
std::string forward(torch::Tensor tensor) {
|
|
return "";
|
|
}
|
|
};
|
|
|
|
struct C : torch::nn::Module {
|
|
float forward(torch::Tensor& tensor) {
|
|
return 5.0;
|
|
}
|
|
};
|
|
|
|
struct D : torch::nn::Module {
|
|
char forward(torch::Tensor&& tensor) {
|
|
return 'x';
|
|
}
|
|
};
|
|
|
|
struct E : torch::nn::Module {};
|
|
|
|
// Put in a function because macros don't handle the comma between arguments to
|
|
// is_same well ...
|
|
template <typename Module, typename ExpectedType, typename... Args>
|
|
void assert_has_expected_type() {
|
|
using ReturnType =
|
|
typename torch::detail::return_type_of_forward<Module, Args...>::type;
|
|
constexpr bool is_expected_type =
|
|
std::is_same<ReturnType, ExpectedType>::value;
|
|
ASSERT_TRUE(is_expected_type) << Module().name();
|
|
}
|
|
|
|
TEST(TestStatic, ReturnTypeOfForward) {
|
|
assert_has_expected_type<A, int>();
|
|
assert_has_expected_type<B, std::string, torch::Tensor>();
|
|
assert_has_expected_type<C, float, torch::Tensor&>();
|
|
assert_has_expected_type<D, char, torch::Tensor&&>();
|
|
assert_has_expected_type<E, void>();
|
|
}
|
|
|
|
TEST(TestStatic, Apply) {
|
|
std::vector<int> v;
|
|
torch::apply([&v](int x) { v.push_back(x); }, 1, 2, 3, 4, 5);
|
|
ASSERT_EQ(v.size(), 5);
|
|
for (size_t i = 0; i < v.size(); ++i) {
|
|
ASSERT_EQ(v.at(i), i + 1);
|
|
}
|
|
}
|