#include #include #include #include #include "caffe2/core/blob.h" #include "caffe2/core/context.h" #include "caffe2/core/tensor.h" #include "caffe2/proto/caffe2_pb.h" #include "caffe2/utils/conversions.h" #include "caffe2/utils/math.h" namespace caffe2 { TEST(MathTest, GemmNoTransNoTrans) { DeviceOption option; CPUContext cpu_context(option); Tensor X(std::vector{5, 10}, CPU); Tensor W(std::vector{10, 6}, CPU); Tensor Y(std::vector{5, 6}, CPU); EXPECT_EQ(X.numel(), 50); EXPECT_EQ(W.numel(), 60); math::Set( X.numel(), 1, X.mutable_data(), &cpu_context); math::Set( W.numel(), 1, W.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 30); for (int i = 0; i < X.numel(); ++i) { CHECK_EQ(X.data()[i], 1); } for (int i = 0; i < W.numel(); ++i) { CHECK_EQ(W.data()[i], 1); } const float kOne = 1.0; const float kPointFive = 0.5; const float kZero = 0.0; math::Gemm( CblasNoTrans, CblasNoTrans, 5, 6, 10, kOne, X.data(), W.data(), kZero, Y.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 30); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 10) << i; } // Test Accumulate math::Gemm( CblasNoTrans, CblasNoTrans, 5, 6, 10, kOne, X.data(), W.data(), kPointFive, Y.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 30); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 15) << i; } // Test Accumulate math::Gemm( CblasNoTrans, CblasNoTrans, 5, 6, 10, kPointFive, X.data(), W.data(), kOne, Y.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 30); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 20) << i; } } TEST(MathTest, GemmNoTransTrans) { DeviceOption option; CPUContext cpu_context(option); Tensor X(std::vector{5, 10}, CPU); Tensor W(std::vector{6, 10}, CPU); Tensor Y(std::vector{5, 6}, CPU); EXPECT_EQ(X.numel(), 50); EXPECT_EQ(W.numel(), 60); math::Set( X.numel(), 1, X.mutable_data(), &cpu_context); math::Set( W.numel(), 1, W.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 30); for (int i = 0; i < X.numel(); ++i) { CHECK_EQ(X.data()[i], 1); } for (int i = 0; i < W.numel(); ++i) { CHECK_EQ(W.data()[i], 1); } const float kOne = 1.0; const float kPointFive = 0.5; const float kZero = 0.0; math::Gemm( CblasNoTrans, CblasTrans, 5, 6, 10, kOne, X.data(), W.data(), kZero, Y.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 30); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 10) << i; } // Test Accumulate math::Gemm( CblasNoTrans, CblasTrans, 5, 6, 10, kOne, X.data(), W.data(), kPointFive, Y.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 30); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 15) << i; } math::Gemm( CblasNoTrans, CblasTrans, 5, 6, 10, kPointFive, X.data(), W.data(), kOne, Y.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 30); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 20) << i; } } namespace { constexpr float kEps = 1e-5; class GemmBatchedTest : public testing::TestWithParam> { protected: void SetUp() override { cpu_context_ = make_unique(option_); ReinitializeTensor( &X_, std::vector{3, 5, 10}, at::dtype().device(CPU)); ReinitializeTensor( &W_, std::vector{3, 6, 10}, at::dtype().device(CPU)); ReinitializeTensor( &Y_, std::vector{3, 5, 6}, at::dtype().device(CPU)); math::Set( X_.numel(), 1, X_.mutable_data(), cpu_context_.get()); math::Set( W_.numel(), 1, W_.mutable_data(), cpu_context_.get()); trans_X_ = std::get<0>(GetParam()); trans_W_ = std::get<1>(GetParam()); } void RunGemmBatched(const float alpha, const float beta) { const float* X_data = X_.template data(); const float* W_data = W_.template data(); float* Y_data = Y_.template mutable_data(); const int X_stride = 5 * 10; const int W_stride = 6 * 10; const int Y_stride = 5 * 6; std::array X_array = { X_data, X_data + X_stride, X_data + 2 * X_stride}; std::array W_array = { W_data, W_data + W_stride, W_data + 2 * W_stride}; std::array Y_array = { Y_data, Y_data + Y_stride, Y_data + 2 * Y_stride}; math::GemmBatched( trans_X_ ? CblasTrans : CblasNoTrans, trans_W_ ? CblasTrans : CblasNoTrans, 3, 5, 6, 10, alpha, X_array.data(), W_array.data(), beta, Y_array.data(), cpu_context_.get()); } void RunGemmStridedBatched(const float alpha, const float beta) { const float* X_data = X_.template data(); const float* W_data = W_.template data(); float* Y_data = Y_.template mutable_data(); const int X_stride = 5 * 10; const int W_stride = 6 * 10; const int Y_stride = 5 * 6; math::GemmStridedBatched( trans_X_ ? CblasTrans : CblasNoTrans, trans_W_ ? CblasTrans : CblasNoTrans, 3, 5, 6, 10, alpha, X_data, X_stride, W_data, W_stride, beta, Y_data, Y_stride, cpu_context_.get()); } void VerifyOutput(const float value) const { for (int i = 0; i < Y_.numel(); ++i) { EXPECT_FLOAT_EQ(value, Y_.template data()[i]); } } DeviceOption option_; std::unique_ptr cpu_context_; Tensor X_; Tensor W_; Tensor Y_; bool trans_X_; bool trans_W_; }; TEST_P(GemmBatchedTest, GemmBatchedFloatTest) { RunGemmBatched(1.0f, 0.0f); VerifyOutput(10.0f); RunGemmBatched(1.0f, 0.5f); VerifyOutput(15.0f); RunGemmBatched(0.5f, 1.0f); VerifyOutput(20.0f); } TEST_P(GemmBatchedTest, GemmStridedBatchedFloatTest) { RunGemmStridedBatched(1.0f, 0.0f); VerifyOutput(10.0f); RunGemmStridedBatched(1.0f, 0.5f); VerifyOutput(15.0f); RunGemmStridedBatched(0.5f, 1.0f); VerifyOutput(20.0f); } INSTANTIATE_TEST_CASE_P( GemmBatchedTrans, GemmBatchedTest, testing::Combine(testing::Bool(), testing::Bool())); } // namespace TEST(MathTest, GemvNoTrans) { DeviceOption option; CPUContext cpu_context(option); Tensor A(std::vector{5, 10}, CPU); Tensor X(std::vector{10}, CPU); Tensor Y(std::vector{5}, CPU); EXPECT_EQ(A.numel(), 50); EXPECT_EQ(X.numel(), 10); math::Set( A.numel(), 1, A.mutable_data(), &cpu_context); math::Set( X.numel(), 1, X.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 5); for (int i = 0; i < A.numel(); ++i) { CHECK_EQ(A.data()[i], 1); } for (int i = 0; i < X.numel(); ++i) { CHECK_EQ(X.data()[i], 1); } const float kOne = 1.0; const float kPointFive = 0.5; const float kZero = 0.0; math::Gemv( CblasNoTrans, 5, 10, kOne, A.data(), X.data(), kZero, Y.mutable_data(), &cpu_context); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 10) << i; } // Test Accumulate math::Gemv( CblasNoTrans, 5, 10, kOne, A.data(), X.data(), kPointFive, Y.mutable_data(), &cpu_context); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 15) << i; } // Test Accumulate math::Gemv( CblasNoTrans, 5, 10, kPointFive, A.data(), X.data(), kOne, Y.mutable_data(), &cpu_context); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 20) << i; } } TEST(MathTest, GemvTrans) { DeviceOption option; CPUContext cpu_context(option); Tensor A(std::vector{6, 10}, CPU); Tensor X(std::vector{6}, CPU); Tensor Y(std::vector{10}, CPU); EXPECT_EQ(A.numel(), 60); EXPECT_EQ(X.numel(), 6); math::Set( A.numel(), 1, A.mutable_data(), &cpu_context); math::Set( X.numel(), 1, X.mutable_data(), &cpu_context); EXPECT_EQ(Y.numel(), 10); for (int i = 0; i < A.numel(); ++i) { CHECK_EQ(A.data()[i], 1); } for (int i = 0; i < X.numel(); ++i) { CHECK_EQ(X.data()[i], 1); } const float kOne = 1.0; const float kPointFive = 0.5; const float kZero = 0.0; math::Gemv( CblasTrans, 6, 10, kOne, A.data(), X.data(), kZero, Y.mutable_data(), &cpu_context); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 6) << i; } // Test Accumulate math::Gemv( CblasTrans, 6, 10, kOne, A.data(), X.data(), kPointFive, Y.mutable_data(), &cpu_context); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 9) << i; } // Test Accumulate math::Gemv( CblasTrans, 6, 10, kPointFive, A.data(), X.data(), kOne, Y.mutable_data(), &cpu_context); for (int i = 0; i < Y.numel(); ++i) { CHECK_EQ(Y.data()[i], 12) << i; } } TEST(MathTest, FloatToHalfConversion) { float a = 1.0f; float b = 1.75f; float c = 128.125f; float converted_a = static_cast(at::Half(a)); float converted_b = static_cast(at::Half(b)); float converted_c = static_cast(at::Half(c)); CHECK_EQ(a, converted_a); CHECK_EQ(b, converted_b); CHECK_EQ(c, converted_c); } namespace { class BroadcastTest : public testing::Test { protected: void SetUp() override { cpu_context_ = make_unique(option_); } void RunBroadcastTest( const std::vector& X_dims, const std::vector& Y_dims, const std::vector& X_data, const std::vector& Y_data) { std::vector X_dims_64; std::vector Y_dims_64; std::copy(X_dims.cbegin(), X_dims.cend(), std::back_inserter(X_dims_64)); std::copy(Y_dims.cbegin(), Y_dims.cend(), std::back_inserter(Y_dims_64)); ReinitializeTensor(&X_, X_dims_64, at::dtype().device(CPU)); ReinitializeTensor(&Y_, Y_dims_64, at::dtype().device(CPU)); ASSERT_EQ(X_data.size(), X_.numel()); cpu_context_->CopyFromCPU( X_data.size(), X_data.data(), X_.mutable_data()); math::Broadcast( X_dims.size(), X_dims.data(), Y_dims.size(), Y_dims.data(), 1.0f, X_.data(), Y_.mutable_data(), cpu_context_.get()); ASSERT_EQ(Y_data.size(), Y_.numel()); for (int i = 0; i < Y_data.size(); ++i) { EXPECT_FLOAT_EQ(Y_data[i], Y_.data()[i]); } } DeviceOption option_; std::unique_ptr cpu_context_; Tensor X_; Tensor Y_; }; TEST_F(BroadcastTest, BroadcastFloatTest) { RunBroadcastTest({2}, {2}, {1.0f, 2.0f}, {1.0f, 2.0f}); RunBroadcastTest({1}, {2}, {1.0f}, {1.0f, 1.0f}); RunBroadcastTest({1}, {2, 2}, {1.0f}, {1.0f, 1.0f, 1.0f, 1.0f}); RunBroadcastTest({2, 1}, {2, 2}, {1.0f, 2.0f}, {1.0f, 1.0f, 2.0f, 2.0f}); RunBroadcastTest( {2, 1}, {2, 2, 2}, {1.0f, 2.0f}, {1.0f, 1.0f, 2.0f, 2.0f, 1.0f, 1.0f, 2.0f, 2.0f}); } class RandFixedSumTest : public testing::Test { protected: void SetUp() override { cpu_context_ = make_unique(option_); } DeviceOption option_; std::unique_ptr cpu_context_; }; TEST_F(RandFixedSumTest, UpperBound) { std::vector l(20); math::RandFixedSum( 20, 1, 1000, 1000, l.data(), cpu_context_.get()); } } // namespace } // namespace caffe2