[Vec256][neon] Add neon backend for vec256 (#39341)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/39341

This PR introduces neon backend for vec256 class for float datatype.
For now only aarch64 is enabled due to few issues with enabling in
aarch32 bit.

Test Plan:
vec256_test

Imported from OSS

Differential Revision: D21822399

fbshipit-source-id: 3851c4336d93d1c359c85b38cf19904f82bc7b8d
This commit is contained in:
Kimish Patel 2020-07-09 16:20:22 -07:00 committed by Facebook GitHub Bot
parent bddba1e336
commit d6feb6141f
13 changed files with 1367 additions and 5 deletions

View File

@ -133,6 +133,7 @@ cmake_dependent_option(
"NOT BUILD_SHARED_LIBS" OFF)
option(BUILD_TEST "Build C++ test binaries (need gtest and gbenchmark)" OFF)
option(BUILD_MOBILE_BENCHMARKS "Build C++ test binaries for mobile (ARM) targets(need gtest and gbenchmark)" OFF)
option(BUILD_MOBILE_TEST "Build C++ test binaries for mobile (ARM) targets(need gtest and gbenchmark)" OFF)
option(BUILD_JNI "Build JNI bindings" OFF)
cmake_dependent_option(
INSTALL_TEST "Install test binaries if BUILD_TEST is on" ON

View File

@ -119,6 +119,7 @@ set(ATen_CUDA_TEST_SRCS ${ATen_CUDA_TEST_SRCS} PARENT_SCOPE)
set(ATen_HIP_TEST_SRCS ${ATen_HIP_TEST_SRCS} PARENT_SCOPE)
set(ATen_VULKAN_TEST_SRCS ${ATen_VULKAN_TEST_SRCS} PARENT_SCOPE)
set(ATen_MOBILE_BENCHMARK_SRCS ${ATen_MOBILE_BENCHMARK_SRCS} PARENT_SCOPE)
set(ATen_MOBILE_TEST_SRCS ${ATen_MOBILE_TEST_SRCS} PARENT_SCOPE)
set(ATen_CPU_INCLUDE ${ATen_CPU_INCLUDE} PARENT_SCOPE)
set(ATen_CUDA_INCLUDE ${ATen_CUDA_INCLUDE} PARENT_SCOPE)
set(ATen_HIP_INCLUDE ${ATen_HIP_INCLUDE} PARENT_SCOPE)

View File

@ -417,6 +417,7 @@ set(ATen_CORE_TEST_SRCS ${ATen_CORE_TEST_SRCS} PARENT_SCOPE)
set(ATen_HIP_TEST_SRCS ${ATen_HIP_TEST_SRCS} PARENT_SCOPE)
set(ATen_VULKAN_TEST_SRCS ${ATen_VULKAN_TEST_SRCS} PARENT_SCOPE)
set(ATen_MOBILE_BENCHMARK_SRCS ${ATen_MOBILE_BENCHMARK_SRCS} PARENT_SCOPE)
set(ATen_MOBILE_TEST_SRCS ${ATen_VEC256_TEST_SRCS} PARENT_SCOPE)
set(ATen_QUANTIZED_TEST_SRCS ${ATen_QUANTIZED_TEST_SRCS} PARENT_SCOPE)
set(ATen_CPU_INCLUDE ${ATen_CPU_INCLUDE} PARENT_SCOPE)
set(ATen_THIRD_PARTY_INCLUDE ${ATen_THIRD_PARTY_INCLUDE} PARENT_SCOPE)

View File

@ -2,6 +2,9 @@
#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
/* Clang-compatible compiler, targeting x86/x86-64 */
#include <x86intrin.h>
#elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__))
/* Clang-compatible compiler, targeting arm neon */
#include <arm_neon.h>
#elif defined(_MSC_VER)
/* Microsoft C/C++-compatible compiler */
#include <intrin.h>
@ -14,7 +17,7 @@
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
/* GCC-compatible compiler, targeting x86/x86-64 */
#include <x86intrin.h>
#elif defined(__GNUC__) && defined(__ARM_NEON__)
#elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__))
/* GCC-compatible compiler, targeting ARM with NEON */
#include <arm_neon.h>
#elif defined(__GNUC__) && defined(__IWMMXT__)

View File

@ -7,6 +7,7 @@
#include <ATen/cpu/vec256/vec256_base.h>
#include <ATen/cpu/vec256/vec256_float.h>
#include <ATen/cpu/vec256/vec256_float_neon.h>
#include <ATen/cpu/vec256/vec256_bfloat16.h>
#include <ATen/cpu/vec256/vec256_double.h>
#include <ATen/cpu/vec256/vec256_int.h>

View File

@ -120,8 +120,7 @@ public:
}
template<typename... Args,
typename = std::enable_if_t<(sizeof...(Args) == size())>>
Vec256(Args... vals) {
values = { vals... };
Vec256(Args... vals) : values{vals...}{
}
// This also implies const T& operator[](int idx) const
inline operator const T*() const {

View File

@ -0,0 +1,647 @@
#pragma once
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
// See Note [Do not compile initializers with AVX]
#include <ATen/cpu/vec256/intrinsics.h>
#include <ATen/cpu/vec256/vec256_base.h>
// Sleef offers vectorized versions of some transcedentals
// such as sin, cos, tan etc..
// However for now opting for STL, since we are not building
// with Sleef for mobile yet.
namespace at {
namespace vec256 {
// See Note [Acceptable use of anonymous namespace in header]
namespace {
// Right now contains only aarch64 implementation.
// Due to follow two reasons aarch32 is not currently supported.
// 1. Due to difference in ISA been aarch32 and aarch64, intrinsics
// that work for aarch64 dont work for aarch32.
// 2. Android NDK r21 has problems with compiling aarch32.
// Clang seg faults.
// https://github.com/android/ndk/issues/1248
// https://bugs.llvm.org/show_bug.cgi?id=45824
// Most likely we will do aarch32 support with inline asm.
#if defined(__aarch64__)
#ifdef __BIG_ENDIAN__
#error "Big endian is not supported."
#endif
template<int index, bool mask_val>
struct BlendRegs {
static float32x4_t impl(
const float32x4_t& a, const float32x4_t& b, float32x4_t& res);
};
template<int index>
struct BlendRegs<index, true>{
static float32x4_t impl(
const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
return vsetq_lane_f32(vgetq_lane_f32(b, index), res, index);
}
};
template<int index>
struct BlendRegs<index, false>{
static float32x4_t impl(
const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
return vsetq_lane_f32(vgetq_lane_f32(a, index), res, index);
}
};
template <> class Vec256<float> {
private:
float32x4x2_t values;
public:
using value_type = float;
static constexpr int size() {
return 8;
}
Vec256() {}
Vec256(float32x4x2_t v) : values(v) {}
Vec256(float val) : values{vdupq_n_f32(val), vdupq_n_f32(val) } {}
Vec256(float val0, float val1, float val2, float val3,
float val4, float val5, float val6, float val7) :
values{val0, val1, val2, val3, val4, val5, val6, val7} {}
Vec256(float32x4_t val0, float32x4_t val1) : values{val0, val1} {}
operator float32x4x2_t() const {
return values;
}
template <int64_t mask>
static Vec256<float> blend(const Vec256<float>& a, const Vec256<float>& b) {
Vec256<float> vec;
// 0.
vec.values.val[0] =
BlendRegs<0, (mask & 0x01)!=0>::impl(
a.values.val[0], b.values.val[0], vec.values.val[0]);
vec.values.val[0] =
BlendRegs<1, (mask & 0x02)!=0>::impl(
a.values.val[0], b.values.val[0], vec.values.val[0]);
vec.values.val[0] =
BlendRegs<2, (mask & 0x04)!=0>::impl(
a.values.val[0], b.values.val[0], vec.values.val[0]);
vec.values.val[0] =
BlendRegs<3, (mask & 0x08)!=0>::impl(
a.values.val[0], b.values.val[0], vec.values.val[0]);
// 1.
vec.values.val[1] =
BlendRegs<0, (mask & 0x10)!=0>::impl(
a.values.val[1], b.values.val[1], vec.values.val[1]);
vec.values.val[1] =
BlendRegs<1, (mask & 0x20)!=0>::impl(
a.values.val[1], b.values.val[1], vec.values.val[1]);
vec.values.val[1] =
BlendRegs<2, (mask & 0x40)!=0>::impl(
a.values.val[1], b.values.val[1], vec.values.val[1]);
vec.values.val[1] =
BlendRegs<3, (mask & 0x80)!=0>::impl(
a.values.val[1], b.values.val[1], vec.values.val[1]);
return vec;
}
static Vec256<float> blendv(const Vec256<float>& a, const Vec256<float>& b,
const Vec256<float>& mask) {
// TODO
// NB: This requires that each value, i.e., each uint value,
// of the mask either all be zeros or all be 1s.
// We perhaps need some kind of an assert?
// But that will affect performance.
Vec256<float> vec(mask.values);
vec.values.val[0] = vbslq_f32(
vreinterpretq_u32_f32(vec.values.val[0]),
b.values.val[0],
a.values.val[0]);
vec.values.val[1] = vbslq_f32(
vreinterpretq_u32_f32(vec.values.val[1]),
b.values.val[1],
a.values.val[1]);
return vec;
}
template<typename step_t>
static Vec256<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
const Vec256<float> base_vec(base);
const Vec256<float> step_vec(step);
const Vec256<float> step_sizes(0, 1, 2, 3, 4, 5, 6, 7);
return fmadd(step_sizes, step_vec, base_vec);
}
static Vec256<float> set(const Vec256<float>& a, const Vec256<float>& b,
int64_t count = size()) {
switch (count) {
case 0:
return a;
case 1:
{
Vec256<float> vec;
static uint32x4_t mask_low = {0xFFFFFFFF, 0x0, 0x0, 0x0};
vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
vec.values.val[1] = a.values.val[1];
vec.values.val[0] = vbslq_f32(
vreinterpretq_u32_f32(vec.values.val[0]),
b.values.val[0],
a.values.val[0]);
return vec;
}
case 2:
{
Vec256<float> vec;
static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0};
vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
vec.values.val[1] = a.values.val[1];
vec.values.val[0] = vbslq_f32(
vreinterpretq_u32_f32(vec.values.val[0]),
b.values.val[0],
a.values.val[0]);
return vec;
}
case 3:
{
Vec256<float> vec;
static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
vec.values.val[1] = a.values.val[1];
vec.values.val[0] = vbslq_f32(
vreinterpretq_u32_f32(vec.values.val[0]),
b.values.val[0],
a.values.val[0]);
return vec;
}
case 4:
return Vec256<float>(b.values.val[0], a.values.val[1]);
case 5:
{
Vec256<float> vec;
static uint32x4_t mask_high = {0xFFFFFFFF, 0x0, 0x0, 0x0};
vec.values.val[0] = b.values.val[0];
vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
vec.values.val[1] = vbslq_f32(
vreinterpretq_u32_f32(vec.values.val[1]),
b.values.val[1],
a.values.val[1]);
return vec;
}
case 6:
{
Vec256<float> vec;
static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0};
vec.values.val[0] = b.values.val[0];
vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
vec.values.val[1] = vbslq_f32(
vreinterpretq_u32_f32(vec.values.val[1]),
b.values.val[1],
a.values.val[1]);
return vec;
}
case 7:
{
Vec256<float> vec;
static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
vec.values.val[0] = b.values.val[0];
vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
vec.values.val[1] = vbslq_f32(
vreinterpretq_u32_f32(vec.values.val[1]),
b.values.val[1],
a.values.val[1]);
return vec;
}
}
return b;
}
static Vec256<float> loadu(const void* ptr, int64_t count = size()) {
if (count == size()) {
return vld1q_f32_x2(reinterpret_cast<const float*>(ptr));
}
else if (count == (size() >> 1)) {
Vec256<float> res;
res.values.val[0] = vld1q_f32(reinterpret_cast<const float*>(ptr));
res.values.val[1] = vdupq_n_f32(0.f);
return res;
}
else {
__at_align32__ float tmp_values[size()];
for (auto i = 0; i < size(); ++i) {
tmp_values[i] = 0.0;
}
std::memcpy(
tmp_values,
reinterpret_cast<const float*>(ptr),
count * sizeof(float));
return vld1q_f32_x2(reinterpret_cast<const float*>(tmp_values));
}
}
void store(void* ptr, int64_t count = size()) const {
if (count == size()) {
vst1q_f32_x2(reinterpret_cast<float*>(ptr), values);
}
else if (count == (size() >> 1)) {
vst1q_f32(reinterpret_cast<float*>(ptr), values.val[0]);
}
else {
float tmp_values[size()];
vst1q_f32_x2(reinterpret_cast<float*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(float));
}
}
inline const float32x4_t& get_low() const {
return values.val[0];
}
inline float32x4_t& get_low() {
return values.val[0];
}
inline const float32x4_t& get_high() const {
return values.val[1];
}
inline float32x4_t& get_high() {
return values.val[1];
}
// Very slow implementation of indexing.
// Only required because vec256_qint refers to this.
// Once we specialize that implementation for ARM
// this should be removed. TODO (kimishpatel)
const float operator[](int idx) const {
__at_align32__ float tmp[size()];
store(tmp);
return tmp[idx];
};
const float operator[](int idx) {
__at_align32__ float tmp[size()];
store(tmp);
return tmp[idx];
}
// For boolean version where we want to if any 1/all zero
// etc. can be done faster in a different way.
int zero_mask() const {
__at_align32__ float tmp[size()];
store(tmp);
int mask = 0;
for (int i = 0; i < size(); ++ i) {
if (tmp[i] == 0.f) {
mask |= (1 << i);
}
}
return mask;
}
Vec256<float> map(float (*f)(float)) const {
__at_align32__ float tmp[size()];
store(tmp);
for (int64_t i = 0; i < size(); i++) {
tmp[i] = f(tmp[i]);
}
return loadu(tmp);
}
Vec256<float> abs() const {
return Vec256<float>(vabsq_f32(values.val[0]), vabsq_f32(values.val[1]));
}
Vec256<float> angle() const {
return Vec256<float>(0.f);
}
Vec256<float> real() const {
return *this;
}
Vec256<float> imag() const {
return Vec256<float>(0.f);
}
Vec256<float> conj() const {
return *this;
}
Vec256<float> acos() const {
return map(std::acos);
}
Vec256<float> asin() const {
return map(std::asin);
}
Vec256<float> atan() const {
return map(std::atan);
}
Vec256<float> atan2(const Vec256<float> &exp) const {
__at_align32__ float tmp[size()];
__at_align32__ float tmp_exp[size()];
store(tmp);
exp.store(tmp_exp);
for (int64_t i = 0; i < size(); i++) {
tmp[i] = std::atan2(tmp[i], tmp_exp[i]);
}
return loadu(tmp);
}
Vec256<float> erf() const {
return map(std::erf);
}
Vec256<float> erfc() const {
return map(std::erfc);
}
Vec256<float> erfinv() const {
return map(calc_erfinv);
}
Vec256<float> exp() const {
return map(std::exp);
}
Vec256<float> expm1() const {
return map(std::expm1);
}
Vec256<float> fmod(const Vec256<float>& q) const {
__at_align32__ float tmp[size()];
__at_align32__ float tmp_q[size()];
store(tmp);
q.store(tmp_q);
for (int64_t i = 0; i < size(); i++) {
tmp[i] = std::fmod(tmp[i], tmp_q[i]);
}
return loadu(tmp);
}
Vec256<float> log() const {
return map(std::log);
}
Vec256<float> log10() const {
return map(std::log10);
}
Vec256<float> log1p() const {
return map(std::log1p);
}
Vec256<float> log2() const {
return map(std::log2);
}
Vec256<float> frac() const;
Vec256<float> sin() const {
return map(std::sin);
}
Vec256<float> sinh() const {
return map(std::sinh);
}
Vec256<float> cos() const {
return map(std::cos);
}
Vec256<float> cosh() const {
return map(std::cosh);
}
Vec256<float> ceil() const {
return map(at::native::ceil_impl);
}
Vec256<float> floor() const {
return map(at::native::floor_impl);
}
Vec256<float> neg() const {
return Vec256<float>(
vnegq_f32(values.val[0]),
vnegq_f32(values.val[1]));
}
Vec256<float> round() const {
// We do not use std::round because we would like to round midway numbers to the nearest even integer.
return map(at::native::round_impl);
}
Vec256<float> tan() const {
return map(std::tan);
}
Vec256<float> tanh() const {
return map(std::tanh);
}
Vec256<float> trunc() const {
float32x4_t r0 = vcvtq_f32_s32(vcvtq_s32_f32(values.val[0]));
float32x4_t r1 = vcvtq_f32_s32(vcvtq_s32_f32(values.val[1]));
return Vec256<float>(r0, r1);
}
Vec256<float> lgamma() const {
return map(std::lgamma);
}
Vec256<float> sqrt() const {
return Vec256<float>(
vsqrtq_f32(values.val[0]),
vsqrtq_f32(values.val[1]));
}
Vec256<float> reciprocal() const {
return Vec256<float>(
vrecpeq_f32(values.val[0]),
vrecpeq_f32(values.val[1]));
}
Vec256<float> rsqrt() const {
return Vec256<float>(
vrsqrteq_f32(values.val[0]),
vrsqrteq_f32(values.val[1]));
}
Vec256<float> pow(const Vec256<float> &exp) const {
__at_align32__ float tmp[size()];
__at_align32__ float tmp_exp[size()];
store(tmp);
exp.store(tmp_exp);
for (int64_t i = 0; i < size(); i++) {
tmp[i] = std::pow(tmp[i], tmp_exp[i]);
}
return loadu(tmp);
}
Vec256<float> operator==(const Vec256<float>& other) const {
float32x4_t r0 =
vreinterpretq_f32_u32(vceqq_f32(values.val[0], other.values.val[0]));
float32x4_t r1 =
vreinterpretq_f32_u32(vceqq_f32(values.val[1], other.values.val[1]));
return Vec256<float>(r0, r1);
}
Vec256<float> operator!=(const Vec256<float>& other) const {
float32x4_t r0 = vreinterpretq_f32_u32(
vmvnq_u32(vceqq_f32(values.val[0], other.values.val[0])));
float32x4_t r1 = vreinterpretq_f32_u32(
vmvnq_u32(vceqq_f32(values.val[1], other.values.val[1])));
return Vec256<float>(r0, r1);
}
Vec256<float> operator<(const Vec256<float>& other) const {
float32x4_t r0 =
vreinterpretq_f32_u32(vcltq_f32(values.val[0], other.values.val[0]));
float32x4_t r1 =
vreinterpretq_f32_u32(vcltq_f32(values.val[1], other.values.val[1]));
return Vec256<float>(r0, r1);
}
Vec256<float> operator<=(const Vec256<float>& other) const {
float32x4_t r0 =
vreinterpretq_f32_u32(vcleq_f32(values.val[0], other.values.val[0]));
float32x4_t r1 =
vreinterpretq_f32_u32(vcleq_f32(values.val[1], other.values.val[1]));
return Vec256<float>(r0, r1);
}
Vec256<float> operator>(const Vec256<float>& other) const {
float32x4_t r0 =
vreinterpretq_f32_u32(vcgtq_f32(values.val[0], other.values.val[0]));
float32x4_t r1 =
vreinterpretq_f32_u32(vcgtq_f32(values.val[1], other.values.val[1]));
return Vec256<float>(r0, r1);
}
Vec256<float> operator>=(const Vec256<float>& other) const {
float32x4_t r0 =
vreinterpretq_f32_u32(vcgeq_f32(values.val[0], other.values.val[0]));
float32x4_t r1 =
vreinterpretq_f32_u32(vcgeq_f32(values.val[1], other.values.val[1]));
return Vec256<float>(r0, r1);
}
Vec256<float> eq(const Vec256<float>& other) const;
Vec256<float> ne(const Vec256<float>& other) const;
Vec256<float> gt(const Vec256<float>& other) const;
Vec256<float> ge(const Vec256<float>& other) const;
Vec256<float> lt(const Vec256<float>& other) const;
Vec256<float> le(const Vec256<float>& other) const;
};
template <>
Vec256<float> inline operator+(const Vec256<float>& a, const Vec256<float>& b) {
float32x4_t r0 = vaddq_f32(a.get_low(), b.get_low());
float32x4_t r1 = vaddq_f32(a.get_high(), b.get_high());
return Vec256<float>(r0, r1);
}
template <>
Vec256<float> inline operator-(const Vec256<float>& a, const Vec256<float>& b) {
float32x4_t r0 = vsubq_f32(a.get_low(), b.get_low());
float32x4_t r1 = vsubq_f32(a.get_high(), b.get_high());
return Vec256<float>(r0, r1);
}
template <>
Vec256<float> inline operator*(const Vec256<float>& a, const Vec256<float>& b) {
float32x4_t r0 = vmulq_f32(a.get_low(), b.get_low());
float32x4_t r1 = vmulq_f32(a.get_high(), b.get_high());
return Vec256<float>(r0, r1);
}
template <>
Vec256<float> inline operator/(const Vec256<float>& a, const Vec256<float>& b) {
float32x4_t r0 = vdivq_f32(a.get_low(), b.get_low());
float32x4_t r1 = vdivq_f32(a.get_high(), b.get_high());
return Vec256<float>(r0, r1);
}
// frac. Implement this here so we can use subtraction
Vec256<float> Vec256<float>::frac() const {
return *this - this->trunc();
}
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
// either input is a NaN.
template <>
Vec256<float> inline maximum(const Vec256<float>& a, const Vec256<float>& b) {
float32x4_t r0 = vmaxq_f32(a.get_low(), b.get_low());
float32x4_t r1 = vmaxq_f32(a.get_high(), b.get_high());
return Vec256<float>(r0, r1);
}
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
// either input is a NaN.
template <>
Vec256<float> inline minimum(const Vec256<float>& a, const Vec256<float>& b) {
float32x4_t r0 = vminq_f32(a.get_low(), b.get_low());
float32x4_t r1 = vminq_f32(a.get_high(), b.get_high());
return Vec256<float>(r0, r1);
}
template <>
Vec256<float> inline clamp(const Vec256<float>& a, const Vec256<float>& min, const Vec256<float>& max) {
return minimum(max, maximum(min, a));
}
template <>
Vec256<float> inline clamp_max(const Vec256<float>& a, const Vec256<float>& max) {
return minimum(max, a);
}
template <>
Vec256<float> inline clamp_min(const Vec256<float>& a, const Vec256<float>& min) {
return maximum(min, a);
}
template <>
Vec256<float> inline operator&(const Vec256<float>& a, const Vec256<float>& b) {
float32x4_t r0 = vreinterpretq_u32_f32(vandq_u32(
vreinterpretq_u32_f32(a.get_low()),
vreinterpretq_u32_f32(b.get_low())));
float32x4_t r1 = vreinterpretq_u32_f32(vandq_u32(
vreinterpretq_u32_f32(a.get_high()),
vreinterpretq_u32_f32(b.get_high())));
return Vec256<float>(r0, r1);
}
template <>
Vec256<float> inline operator|(const Vec256<float>& a, const Vec256<float>& b) {
float32x4_t r0 = vreinterpretq_u32_f32(vorrq_u32(
vreinterpretq_u32_f32(a.get_low()),
vreinterpretq_u32_f32(b.get_low())));
float32x4_t r1 = vreinterpretq_u32_f32(vorrq_u32(
vreinterpretq_u32_f32(a.get_high()),
vreinterpretq_u32_f32(b.get_high())));
return Vec256<float>(r0, r1);
}
template <>
Vec256<float> inline operator^(const Vec256<float>& a, const Vec256<float>& b) {
float32x4_t r0 = vreinterpretq_u32_f32(veorq_u32(
vreinterpretq_u32_f32(a.get_low()),
vreinterpretq_u32_f32(b.get_low())));
float32x4_t r1 = vreinterpretq_u32_f32(veorq_u32(
vreinterpretq_u32_f32(a.get_high()),
vreinterpretq_u32_f32(b.get_high())));
return Vec256<float>(r0, r1);
}
Vec256<float> Vec256<float>::eq(const Vec256<float>& other) const {
return (*this == other) & Vec256<float>(1.0f);
}
Vec256<float> Vec256<float>::ne(const Vec256<float>& other) const {
return (*this != other) & Vec256<float>(1.0f);
}
Vec256<float> Vec256<float>::gt(const Vec256<float>& other) const {
return (*this > other) & Vec256<float>(1.0f);
}
Vec256<float> Vec256<float>::ge(const Vec256<float>& other) const {
return (*this >= other) & Vec256<float>(1.0f);
}
Vec256<float> Vec256<float>::lt(const Vec256<float>& other) const {
return (*this < other) & Vec256<float>(1.0f);
}
Vec256<float> Vec256<float>::le(const Vec256<float>& other) const {
return (*this <= other) & Vec256<float>(1.0f);
}
template <>
inline void convert(const float* src, int32_t* dst, int64_t n) {
int64_t i;
#pragma unroll
for (i = 0; i <= (n - Vec256<float>::size()); i += Vec256<float>::size()) {
vst1q_s32(dst + i, vcvtq_s32_f32(vld1q_f32(src + i)));
vst1q_s32(dst + i + 4, vcvtq_s32_f32(vld1q_f32(src + i + 4)));
}
#pragma unroll
for (; i < n; i++) {
dst[i] = static_cast<int32_t>(src[i]);
}
}
template <>
inline void convert(const int32_t* src, float* dst, int64_t n) {
int64_t i;
#pragma unroll
for (i = 0; i <= (n - Vec256<float>::size()); i += Vec256<float>::size()) {
vst1q_f32(dst + i, vcvtq_f32_s32(vld1q_s32(src + i)));
vst1q_f32(dst + i + 4, vcvtq_f32_s32(vld1q_s32(src + i + 4)));
}
#pragma unroll
for (; i < n; i++) {
dst[i] = static_cast<float>(src[i]);
}
}
template <>
Vec256<float> inline fmadd(const Vec256<float>& a, const Vec256<float>& b, const Vec256<float>& c) {
float32x4_t r0 = vfmaq_f32(c.get_low(), a.get_low(), b.get_low());
float32x4_t r1 = vfmaq_f32(c.get_high(), a.get_high(), b.get_high());
return Vec256<float>(r0, r1);
}
#endif
}}}

View File

@ -1093,10 +1093,19 @@ struct Vec256QuantizedConverter {
Vec256<float> scale_zp_premul) const {
float_vec_return_type rv;
for (int i = 0; i < float_num_vecs(); ++i) {
float tmp_vals[8];
for (int j = 0; j < 8; ++j) {
rv[i][j] = at::native::dequantize_val<T>(
tmp_vals[j] = at::native::dequantize_val<T>(
scale[j], zero_point[j], T(vals[8 * i + j]));
}
rv[i] = Vec256<float>(tmp_vals[0],
tmp_vals[1],
tmp_vals[2],
tmp_vals[3],
tmp_vals[4],
tmp_vals[5],
tmp_vals[6],
tmp_vals[7]);
}
return rv;
}

View File

@ -77,8 +77,12 @@ list(APPEND ATen_HIP_TEST_SRCS
list(APPEND ATen_VULKAN_TEST_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/vulkan_test.cpp)
list(APPEND ATen_VEC256_TEST_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/vec256_test.cpp)
# ---[ Send the lists to the parent scope.
set(ATen_CPU_TEST_SRCS ${ATen_CPU_TEST_SRCS} PARENT_SCOPE)
set(ATen_CUDA_TEST_SRCS ${ATen_CUDA_TEST_SRCS} PARENT_SCOPE)
set(ATen_HIP_TEST_SRCS ${ATen_HIP_TEST_SRCS} PARENT_SCOPE)
set(ATen_VULKAN_TEST_SRCS ${ATen_VULKAN_TEST_SRCS} PARENT_SCOPE)
set(ATen_VEC256_TEST_SRCS ${ATen_VEC256_TEST_SRCS} PARENT_SCOPE)

View File

@ -0,0 +1,671 @@
#include <gtest/gtest.h>
#include <ATen/cpu/vec256/vec256.h>
#include <ATen/ATen.h>
#include <functional>
using namespace at::vec256;
bool check_equal(const at::Tensor& a, const at::Tensor& b) {
return (a.equal(b));
}
bool check_almost_equal(
const at::Tensor& a, const at::Tensor& b, const float tolerance) {
double max_val = a.abs().max().item<float>();
max_val = std::max(max_val, b.abs().max().item<float>());
if ((a - b).abs().max().item<float>() > tolerance * max_val) {
std::cout << "Max difference:"
<< (a - b).abs().max().item<float>() << std::endl;
return false;
}
return true;
}
template<typename T>
void BlendTestHelperScalar(
const T* a_ptr,
const T* b_ptr,
T* res_ptr,
const int64_t num_els,
const int64_t count) {
for(auto i = 0; i < num_els; ++i) {
for (auto j = 0; j < Vec256<float>::size(); ++j) {
auto index = i * Vec256<float>::size() + j;
if (j < count) {
res_ptr[index] = b_ptr[index];
} else {
res_ptr[index] = a_ptr[index];
}
}
}
}
namespace Impl {
float reciprocal(const float a) {
return (1/a);
}
float rsqrt(const float a) {
return (1/std::sqrt(a));
}
float frac(const float a) {
return a - (static_cast<int32_t>(a));
}
}
template<typename T>
void BlendTestHelperVector(
const T* a_ptr,
const T* b_ptr,
T* res_ptr,
const int64_t num_els,
const int64_t count) {
for(auto i = 0; i < num_els; ++i) {
auto a_elements = Vec256<float>::loadu(a_ptr);
auto b_elements = Vec256<float>::loadu(b_ptr);
a_ptr += Vec256<float>::size();
b_ptr += Vec256<float>::size();
auto res_elements = Vec256<float>::set(a_elements, b_elements, count);
res_elements.store(res_ptr);
res_ptr += Vec256<float>::size();
}
}
#define TranscedentalTester(opnamespace, name) \
void TranscedentalHelper_##name(const float tolerance = 1e-6) { \
at::Tensor a = at::rand({23, 23}); \
a = a * -10; \
a = a + 10; \
at::Tensor ref_res = at::zeros({23, 23}); \
at::Tensor vec_res = at::zeros({23, 23}); \
float* a_ptr = a.data_ptr<float>(); \
float* ref_res_ptr = ref_res.data_ptr<float>(); \
float* vec_res_ptr = vec_res.data_ptr<float>(); \
size_t num_els = \
(a.numel() / Vec256<float>::size()) * Vec256<float>::size(); \
for(auto i = 0; i < num_els; ++i) { \
ref_res_ptr[i] = opnamespace::name(a_ptr[i]); \
} \
for (size_t i = 0; i < num_els; i += Vec256<float>::size()) { \
auto a_elements = Vec256<float>::loadu(a_ptr); \
a_ptr += Vec256<float>::size(); \
auto res = a_elements.name(); \
res.store(vec_res_ptr); \
vec_res_ptr += Vec256<float>::size(); \
} \
ASSERT_TRUE(check_almost_equal(ref_res, vec_res, tolerance)); \
}
#define TranscedentalTester2(name) \
void TranscedentalHelper_##name(const float tolerance = 1e-6) { \
at::Tensor a = at::rand({23, 23}); \
at::Tensor b = at::rand({23, 23}); \
a = a * -10; \
a = a + 10; \
at::Tensor ref_res = at::zeros({23, 23}); \
at::Tensor vec_res = at::zeros({23, 23}); \
float* a_ptr = a.data_ptr<float>(); \
float* b_ptr = a.data_ptr<float>(); \
float* ref_res_ptr = ref_res.data_ptr<float>(); \
float* vec_res_ptr = vec_res.data_ptr<float>(); \
size_t num_els = \
(a.numel() / Vec256<float>::size()) * Vec256<float>::size(); \
for(auto i = 0; i < num_els; ++i) { \
ref_res_ptr[i] = std::name(a_ptr[i], b_ptr[i]); \
} \
for (size_t i = 0; i < num_els; i += Vec256<float>::size()) { \
auto a_elements = Vec256<float>::loadu(a_ptr); \
auto b_elements = Vec256<float>::loadu(b_ptr); \
a_ptr += Vec256<float>::size(); \
b_ptr += Vec256<float>::size(); \
auto res = a_elements.name(b_elements); \
res.store(vec_res_ptr); \
vec_res_ptr += Vec256<float>::size(); \
} \
ASSERT_TRUE(check_almost_equal(ref_res, vec_res, tolerance)); \
}
// Not testing all the transcendentals.
// In fact fewer than these might suffice, since current implementation
// actually just calls STL version of these.
// So what is really being checked is the logic to map a function.
TranscedentalTester(std, abs)
TranscedentalTester(std, acos)
TranscedentalTester(std, asin)
TranscedentalTester(std, atan)
TranscedentalTester(std, erf)
TranscedentalTester(std, exp)
TranscedentalTester(std, log)
TranscedentalTester(std, tan)
TranscedentalTester(std, trunc)
TranscedentalTester(std, sqrt)
TranscedentalTester2(atan2)
TranscedentalTester2(fmod)
TranscedentalTester2(pow)
TranscedentalTester(Impl, reciprocal)
TranscedentalTester(Impl, rsqrt)
TranscedentalTester(Impl, frac)
enum class OP_TYPE {
EQ = 0,
NE,
GT,
GE,
LT,
LE,
MIN,
MAX,
ADD,
SUB,
MUL,
DIV,
AND,
OR,
EXOR
};
void BasicOpTestHelper(const OP_TYPE& compare_type) {
at::Tensor a = at::rand({23, 23});
at::Tensor b = at::rand({23, 23});
at::Tensor ref_res = at::zeros({23, 23});
at::Tensor vec_res = at::zeros({23, 23});
size_t num_els =
(a.numel() / Vec256<float>::size()) * Vec256<float>::size();
// Vector components
float* a_ptr = a.data_ptr<float>();
float* b_ptr = b.data_ptr<float>();
float* ref_res_ptr = ref_res.data_ptr<float>();
for (size_t i = 0; i < num_els; ++i) {
switch (compare_type) {
case OP_TYPE::EQ:
if (a_ptr[i] == b_ptr[i]) {
ref_res_ptr[i] = 1.0f;
} else {
ref_res_ptr[i] = 0;
}
break;
case OP_TYPE::NE:
if (a_ptr[i] != b_ptr[i]) {
ref_res_ptr[i] = 1.0f;
} else {
ref_res_ptr[i] = 0;
}
break;
case OP_TYPE::GT:
if (a_ptr[i] > b_ptr[i]) {
ref_res_ptr[i] = 1.0f;
} else {
ref_res_ptr[i] = 0;
}
break;
case OP_TYPE::GE:
if (a_ptr[i] >= b_ptr[i]) {
ref_res_ptr[i] = 1.0f;
} else {
ref_res_ptr[i] = 0;
}
break;
case OP_TYPE::LT:
if (a_ptr[i] < b_ptr[i]) {
ref_res_ptr[i] = 1.0f;
} else {
ref_res_ptr[i] = 0;
}
break;
case OP_TYPE::LE:
if (a_ptr[i] <= b_ptr[i]) {
ref_res_ptr[i] = 1.0f;
} else {
ref_res_ptr[i] = 0;
}
break;
case OP_TYPE::MIN:
ref_res_ptr[i] = std::min(a_ptr[i], b_ptr[i]);
break;
case OP_TYPE::MAX:
ref_res_ptr[i] = std::max(a_ptr[i], b_ptr[i]);
break;
case OP_TYPE::ADD:
ref_res_ptr[i] = a_ptr[i] + b_ptr[i];
break;
case OP_TYPE::SUB:
ref_res_ptr[i] = a_ptr[i] - b_ptr[i];
break;
case OP_TYPE::MUL:
ref_res_ptr[i] = a_ptr[i] * b_ptr[i];
break;
case OP_TYPE::DIV:
ref_res_ptr[i] = a_ptr[i] / b_ptr[i];
break;
case OP_TYPE::OR:
{
uint32_t *a_val, *b_val;
a_val = reinterpret_cast<uint32_t*>(&a_ptr[i]);
b_val = reinterpret_cast<uint32_t*>(&b_ptr[i]);
uint32_t c_val = (*a_val) | (*b_val);
float* c_val_float;
c_val_float = reinterpret_cast<float*>(&c_val);
ref_res_ptr[i] = *c_val_float;
}
break;
case OP_TYPE::AND:
{
uint32_t *a_val, *b_val;
a_val = reinterpret_cast<uint32_t*>(&a_ptr[i]);
b_val = reinterpret_cast<uint32_t*>(&b_ptr[i]);
uint32_t c_val = (*a_val) & (*b_val);
float* c_val_float;
c_val_float = reinterpret_cast<float*>(&c_val);
ref_res_ptr[i] = *c_val_float;
}
break;
case OP_TYPE::EXOR:
{
uint32_t *a_val, *b_val;
a_val = reinterpret_cast<uint32_t*>(&a_ptr[i]);
b_val = reinterpret_cast<uint32_t*>(&b_ptr[i]);
uint32_t c_val = (*a_val) ^ (*b_val);
float* c_val_float;
c_val_float = reinterpret_cast<float*>(&c_val);
ref_res_ptr[i] = *c_val_float;
}
break;
}
}
// Vectorized impl
float* vec_res_ptr = vec_res.data_ptr<float>();
for (size_t i = 0; i < num_els; i += Vec256<float>::size()) {
auto a_elements = Vec256<float>::loadu(a_ptr);
auto b_elements = Vec256<float>::loadu(b_ptr);
a_ptr += Vec256<float>::size();
b_ptr += Vec256<float>::size();
Vec256<float> res_elements;
switch (compare_type) {
case OP_TYPE::EQ:
res_elements = a_elements.eq(b_elements);
break;
case OP_TYPE::NE:
res_elements = a_elements.ne(b_elements);
break;
case OP_TYPE::GT:
res_elements = a_elements.gt(b_elements);
break;
case OP_TYPE::GE:
res_elements = a_elements.ge(b_elements);
break;
case OP_TYPE::LT:
res_elements = a_elements.lt(b_elements);
break;
case OP_TYPE::LE:
res_elements = a_elements.le(b_elements);
break;
case OP_TYPE::MIN:
res_elements = at::vec256::minimum(a_elements, b_elements);
break;
case OP_TYPE::MAX:
res_elements = at::vec256::maximum(a_elements, b_elements);
break;
case OP_TYPE::ADD:
res_elements = a_elements + b_elements;
break;
case OP_TYPE::SUB:
res_elements = a_elements - b_elements;
break;
case OP_TYPE::MUL:
res_elements = a_elements * b_elements;
break;
case OP_TYPE::DIV:
res_elements = a_elements / b_elements;
break;
case OP_TYPE::OR:
res_elements = a_elements | b_elements;
break;
case OP_TYPE::AND:
res_elements = a_elements & b_elements;
break;
case OP_TYPE::EXOR:
res_elements = a_elements ^ b_elements;
break;
}
res_elements.store(vec_res_ptr);
vec_res_ptr += Vec256<float>::size();
}
ASSERT_TRUE(check_equal(ref_res, vec_res));
}
// Checks both loads and stores.
TEST(Vec256TestFloat, CopyTest) {
at::Tensor a = at::rand({23, 23});
at::Tensor b = at::zeros({23, 23});
// Copy goes through vec256 via tensoriterator
b.copy_(a);
ASSERT_TRUE(check_equal(a, b));
}
TEST(Vec256TestFloat, arangeTest) {
at::Tensor arange_output_ref = at::zeros({8});
at::Tensor arange_output_vectorized = at::zeros({8});
float base = 7.f;
float step = 5.f;
float* ref_output_ptr = arange_output_ref.data_ptr<float>();
for (int64_t i = 0; i < 8; ++i) {
ref_output_ptr[i] = base + i * step;
}
float* vec_output_ptr = arange_output_vectorized.data_ptr<float>();
auto arange_output = Vec256<float>::arange(base, step);
arange_output.store(vec_output_ptr);
ASSERT_TRUE(check_equal(arange_output_ref, arange_output_vectorized));
}
// Checks blend and blendv.
TEST(Vec256TestFloat, Blend) {
at::Tensor a = at::rand({23, 23});
at::Tensor b = at::rand({23, 23});
at::Tensor ref_res = at::zeros({23, 23});
at::Tensor vec_res = at::zeros({23, 23});
// Check templatized blend.
// Reference result:
const int64_t mask = 0xC5;
// Only check over multiple of Vec::size elements
size_t num_els =
(a.numel() / Vec256<float>::size()) * Vec256<float>::size();
// Vector components
float* a_ptr = a.data_ptr<float>();
float* b_ptr = b.data_ptr<float>();
float* ref_res_ptr = ref_res.data_ptr<float>();
int64_t tmp_mask = mask;
for (size_t i = 0; i < num_els; ++i) {
if (i % Vec256<float>::size() == 0) {
tmp_mask = mask;
}
if (tmp_mask & 0x1) {
ref_res_ptr[i] = b_ptr[i];
} else {
ref_res_ptr[i] = a_ptr[i];
}
tmp_mask = tmp_mask >> 1;
}
// Vectorized impl
float* vec_res_ptr = vec_res.data_ptr<float>();
for (size_t i = 0; i < num_els; i += Vec256<float>::size()) {
auto a_elements = Vec256<float>::loadu(a_ptr);
auto b_elements = Vec256<float>::loadu(b_ptr);
a_ptr += Vec256<float>::size();
b_ptr += Vec256<float>::size();
auto res_elements = Vec256<float>::blend<mask>(a_elements, b_elements);
res_elements.store(vec_res_ptr);
vec_res_ptr += Vec256<float>::size();
}
ASSERT_TRUE(check_equal(ref_res, vec_res));
// Vector components
a_ptr = a.data_ptr<float>();
b_ptr = b.data_ptr<float>();
int32_t full_int_mask = 0xFFFFFFFF;
float* full_ptr = reinterpret_cast<float*>(&full_int_mask);
float full_float_mask = *full_ptr;
Vec256<float> float_mask(full_float_mask, 0, full_float_mask, 0,
0, full_float_mask, 0, 0);
float float_mask_array[Vec256<float>::size()];
float_mask.store(float_mask_array);
ref_res_ptr = ref_res.data_ptr<float>();
for (size_t i = 0; i < num_els; ++i) {
if (float_mask_array[i % Vec256<float>::size()] != 0) {
ref_res_ptr[i] = b_ptr[i];
} else {
ref_res_ptr[i] = a_ptr[i];
}
tmp_mask = tmp_mask >> 1;
}
// Vectorized impl
vec_res_ptr = vec_res.data_ptr<float>();
for (size_t i = 0; i < num_els; i += Vec256<float>::size()) {
auto a_elements = Vec256<float>::loadu(a_ptr);
auto b_elements = Vec256<float>::loadu(b_ptr);
a_ptr += Vec256<float>::size();
b_ptr += Vec256<float>::size();
auto res_elements = Vec256<float>::blendv(a_elements, b_elements, float_mask);
res_elements.store(vec_res_ptr);
vec_res_ptr += Vec256<float>::size();
}
ASSERT_TRUE(check_equal(ref_res, vec_res));
}
// Checks Set
TEST(Vec256TestFloat, Set) {
at::Tensor a = at::rand({23, 23});
at::Tensor b = at::rand({23, 23});
at::Tensor ref_res = at::zeros({23, 23});
at::Tensor vec_res = at::zeros({23, 23});
const float* a_ptr = a.data_ptr<float>();
const float* b_ptr = b.data_ptr<float>();
float* ref_res_ptr = ref_res.data_ptr<float>();
float* vec_res_ptr = vec_res.data_ptr<float>();
// Only check over multiple of Vec::size elements
const size_t num_els = (a.numel() / Vec256<float>::size());
BlendTestHelperScalar(a_ptr, b_ptr, ref_res_ptr, num_els, 0);
BlendTestHelperVector(a_ptr, b_ptr, vec_res_ptr, num_els, 0);
ASSERT_TRUE(check_equal(ref_res, vec_res));
BlendTestHelperScalar(a_ptr, b_ptr, ref_res_ptr, num_els, 1);
BlendTestHelperVector(a_ptr, b_ptr, vec_res_ptr, num_els, 1);
ASSERT_TRUE(check_equal(ref_res, vec_res));
BlendTestHelperScalar(a_ptr, b_ptr, ref_res_ptr, num_els, 4);
BlendTestHelperVector(a_ptr, b_ptr, vec_res_ptr, num_els, 4);
ASSERT_TRUE(check_equal(ref_res, vec_res));
BlendTestHelperScalar(a_ptr, b_ptr, ref_res_ptr, num_els, 6);
BlendTestHelperVector(a_ptr, b_ptr, vec_res_ptr, num_els, 6);
ASSERT_TRUE(check_equal(ref_res, vec_res));
BlendTestHelperScalar(a_ptr, b_ptr, ref_res_ptr, num_els, 8);
BlendTestHelperVector(a_ptr, b_ptr, vec_res_ptr, num_els, 8);
ASSERT_TRUE(check_equal(ref_res, vec_res));
}
TEST(Vec256TestFloat, Abs) {
TranscedentalHelper_abs();
}
TEST(Vec256TestFloat, acos) {
TranscedentalHelper_acos();
}
TEST(Vec256TestFloat, asin) {
TranscedentalHelper_asin();
}
TEST(Vec256TestFloat, atan) {
TranscedentalHelper_atan();
}
TEST(Vec256TestFloat, erf) {
TranscedentalHelper_erf();
}
TEST(Vec256TestFloat, exp) {
TranscedentalHelper_exp();
}
TEST(Vec256TestFloat, tan) {
TranscedentalHelper_tan();
}
TEST(Vec256TestFloat, log) {
TranscedentalHelper_log();
}
TEST(Vec256TestFloat, trunc) {
TranscedentalHelper_trunc();
}
TEST(Vec256TestFloat, sqrt) {
TranscedentalHelper_sqrt();
}
TEST(Vec256TestFloat, atan2) {
TranscedentalHelper_atan2();
}
TEST(Vec256TestFloat, fmod) {
TranscedentalHelper_fmod();
}
TEST(Vec256TestFloat, pow) {
TranscedentalHelper_pow();
}
TEST(Vec256TestFloat, reciprocal) {
TranscedentalHelper_reciprocal(1e-3);
}
TEST(Vec256TestFloat, rsqrt) {
// rsqrt tolerance is much worse.
// If we did not set seed even this is violated sometimes.
TranscedentalHelper_rsqrt(5e-3);
}
TEST(Vec256TestFloat, frac) {
TranscedentalHelper_frac();
}
TEST(Vec256TestFloat, compare_eq) {
BasicOpTestHelper(OP_TYPE::EQ);
}
TEST(Vec256TestFloat, compare_ne) {
BasicOpTestHelper(OP_TYPE::NE);
}
TEST(Vec256TestFloat, compare_gt) {
BasicOpTestHelper(OP_TYPE::GT);
}
TEST(Vec256TestFloat, compare_ge) {
BasicOpTestHelper(OP_TYPE::GE);
}
TEST(Vec256TestFloat, compare_lt) {
BasicOpTestHelper(OP_TYPE::LT);
}
TEST(Vec256TestFloat, compare_le) {
BasicOpTestHelper(OP_TYPE::LE);
}
TEST(Vec256TestFloat, check_min) {
BasicOpTestHelper(OP_TYPE::MIN);
}
TEST(Vec256TestFloat, check_max) {
BasicOpTestHelper(OP_TYPE::MAX);
}
TEST(Vec256TestFloat, compare_add) {
BasicOpTestHelper(OP_TYPE::ADD);
}
TEST(Vec256TestFloat, compare_sub) {
BasicOpTestHelper(OP_TYPE::SUB);
}
TEST(Vec256TestFloat, check_mul) {
BasicOpTestHelper(OP_TYPE::MUL);
}
TEST(Vec256TestFloat, check_div) {
BasicOpTestHelper(OP_TYPE::DIV);
}
TEST(Vec256TestFloat, compare_or) {
BasicOpTestHelper(OP_TYPE::OR);
}
TEST(Vec256TestFloat, check_and) {
BasicOpTestHelper(OP_TYPE::AND);
}
TEST(Vec256TestFloat, check_xor) {
BasicOpTestHelper(OP_TYPE::EXOR);
}
TEST(Vec256TestFloat, check_convert) {
at::Tensor a = at::rand({23, 23});
a = a * -10;
a = a + 10;
at::Tensor ref_res =
at::empty({23, 23}, at::device(at::kCPU).dtype(at::kInt));
at::Tensor vec_res =
at::empty({23, 23}, at::device(at::kCPU).dtype(at::kInt));
float* a_float_ptr = a.data_ptr<float>();
int32_t* ref_res_int_ptr = ref_res.data_ptr<int32_t>();
int32_t* vec_res_int_ptr = vec_res.data_ptr<int32_t>();
for(auto i = 0; i < a.numel(); ++i) {
ref_res_int_ptr[i] = static_cast<int32_t>(a_float_ptr[i]);
}
at::vec256::convert(a_float_ptr, vec_res_int_ptr, a.numel());
ASSERT_TRUE(check_almost_equal(ref_res, vec_res, 1e-6));
a = at::randint(-100, 100, {23, 23});
a = a.to(at::kInt);
ref_res = at::empty({23, 23});
vec_res = at::empty({23, 23});
int32_t* a_int_ptr = a.data_ptr<int32_t>();
float* ref_res_float_ptr = ref_res.data_ptr<float>();
float* vec_res_float_ptr = vec_res.data_ptr<float>();
for(auto i = 0; i < a.numel(); ++i) {
ref_res_float_ptr[i] = static_cast<float>(a_int_ptr[i]);
}
at::vec256::convert(a_int_ptr, vec_res_float_ptr, a.numel());
ASSERT_TRUE(check_almost_equal(ref_res, vec_res, 1e-6));
}
TEST(Vec256TestFloat, check_fmadd) {
at::Tensor a = at::rand({23, 23});
a = a * -10;
a = a + 10;
at::Tensor b = at::rand({23, 23});
b = b * -5;
b = b + 5;
at::Tensor c = at::rand({23, 23});
c = c * 20;
at::Tensor ref_res = at::zeros({23, 23});
at::Tensor vec_res = at::zeros({23, 23});
float* a_ptr = a.data_ptr<float>();
float* b_ptr = a.data_ptr<float>();
float* c_ptr = a.data_ptr<float>();
float* ref_res_ptr = ref_res.data_ptr<float>();
float* vec_res_ptr = vec_res.data_ptr<float>();
size_t num_els =
(a.numel() / Vec256<float>::size()) * Vec256<float>::size();
for(auto i = 0; i < num_els; ++i) {
ref_res_ptr[i] = a_ptr[i] * b_ptr[i] + c_ptr[i];
}
for (size_t i = 0; i < num_els; i += Vec256<float>::size()) {
auto a_elements = Vec256<float>::loadu(a_ptr);
auto b_elements = Vec256<float>::loadu(b_ptr);
auto c_elements = Vec256<float>::loadu(c_ptr);
a_ptr += Vec256<float>::size();
b_ptr += Vec256<float>::size();
c_ptr += Vec256<float>::size();
auto res_elements = at::vec256::fmadd(a_elements, b_elements, c_elements);
res_elements.store(vec_res_ptr);
vec_res_ptr += Vec256<float>::size();
}
ASSERT_TRUE(check_almost_equal(ref_res, vec_res, 1e-6));
}
int main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
at::manual_seed(42);
return RUN_ALL_TESTS();
}

View File

@ -1183,6 +1183,18 @@ if(BUILD_MOBILE_BENCHMARK)
endforeach()
endif()
if(BUILD_MOBILE_TEST)
foreach(test_src ${ATen_MOBILE_TEST_SRCS})
get_filename_component(test_name ${test_src} NAME_WE)
add_executable(${test_name} "${test_src}")
target_link_libraries(${test_name} torch_library gtest_main)
target_include_directories(${test_name} PRIVATE $<INSTALL_INTERFACE:include>)
target_include_directories(${test_name} PRIVATE $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}/include>)
target_include_directories(${test_name} PRIVATE ${ATen_CPU_INCLUDE})
add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
endforeach()
endif()
# ---[ Test binaries.
if(BUILD_TEST)
foreach(test_src ${Caffe2_CPU_TEST_SRCS})

View File

@ -557,7 +557,7 @@ endif()
# ---[ Googletest and benchmark
if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK)
if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK OR BUILD_MOBILE_TEST)
# Preserve build options.
set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
@ -638,6 +638,15 @@ if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK)
message(WARNING "Reverting changes failed for Google Test. The build may fail.")
endif()
endif()
# Cacheing variables to enable incremental build.
# Without this is cross compiling we end up having to blow build directory
# and rebuild from scratch.
if(CMAKE_CROSSCOMPILING)
if(COMPILE_HAVE_STD_REGEX)
set(RUN_HAVE_STD_REGEX 0 CACHE INTERNAL "Cache RUN_HAVE_STD_REGEX output for cross-compile.")
endif()
endif()
endif()
# ---[ FBGEMM

View File

@ -88,10 +88,14 @@ if [ -z "$BUILD_MOBILE_BENCHMARK" ]; then
BUILD_MOBILE_BENCHMARK=0
fi
if [ -z "$BUILD_MOBILE_TEST" ]; then
BUILD_MOBILE_TEST=0
fi
# Don't build artifacts we don't need
CMAKE_ARGS+=("-DBUILD_TEST=OFF")
CMAKE_ARGS+=("-DBUILD_BINARY=OFF")
CMAKE_ARGS+=("-DBUILD_MOBILE_BENCHMARK=$BUILD_MOBILE_BENCHMARK")
CMAKE_ARGS+=("-DBUILD_MOBILE_TEST=$BUILD_MOBILE_TEST")
CMAKE_ARGS+=("-DBUILD_PYTHON=OFF")
CMAKE_ARGS+=("-DBUILD_SHARED_LIBS=OFF")
if (( "${ANDROID_NDK_VERSION:-0}" < 18 )); then