mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/45264 Context for why we are porting to gtest in: https://github.com/pytorch/pytorch/pull/45018. This PR completes the process of porting and removes unused files/macros. Test Plan: Imported from OSS Reviewed By: ZolotukhinM Differential Revision: D23901392 Pulled By: suo fbshipit-source-id: 89526890e1a49462f3f77718f4ee273c5bc578ba
34 lines
1019 B
C++
34 lines
1019 B
C++
#include <gtest/gtest.h>
|
|
|
|
#include "test/cpp/jit/test_utils.h"
|
|
#include "torch/csrc/jit/runtime/graph_executor.h"
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
|
|
TEST(GraphExecutorTest, Basic_CUDA) {
|
|
constexpr int batch_size = 4;
|
|
constexpr int input_size = 256;
|
|
|
|
int hidden_size = 2 * input_size;
|
|
|
|
auto input = at::randn({batch_size, input_size}, at::kCUDA);
|
|
auto hx = at::randn({batch_size, hidden_size}, at::kCUDA);
|
|
auto cx = at::randn({batch_size, hidden_size}, at::kCUDA);
|
|
auto w_ih = t_def(at::randn({4 * hidden_size, input_size}, at::kCUDA));
|
|
auto w_hh = t_def(at::randn({4 * hidden_size, hidden_size}, at::kCUDA));
|
|
|
|
auto g = build_lstm();
|
|
GraphExecutor executor(g, "");
|
|
auto stack = createStack({input, hx, cx, w_ih, w_hh});
|
|
executor.run(stack);
|
|
ASSERT_EQ(stack.size(), 2);
|
|
at::Tensor r0, r1;
|
|
std::tie(r0, r1) = lstm(input, hx, cx, w_ih, w_hh);
|
|
ASSERT_TRUE(almostEqual(stack[0].toTensor(), r0));
|
|
ASSERT_TRUE(almostEqual(stack[1].toTensor(), r1));
|
|
}
|
|
|
|
} // namespace jit
|
|
} // namespace torch
|