mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/37358 Test Plan: ```lang=bash buck build mode/opt -c fbcode.cuda_use_clang=true //vision/fair/detectron2/tools:benchmark ``` Reviewed By: ngimel Differential Revision: D21262235 fbshipit-source-id: 00633352d87da0881b2cc90759265fa0d0bd96be
21 lines
472 B
C++
21 lines
472 B
C++
#pragma once
|
|
#include <cstdint>
|
|
|
|
#include <c10/macros/Macros.h>
|
|
|
|
namespace c10 {
|
|
|
|
/**
|
|
* This is the data type for quantized Tensors. Right now we only have
|
|
* qint8 which is for 8 bit Tensors, and qint32 for 32 bit int Tensors,
|
|
* we might have 4 bit, 2 bit or 1 bit data types in the future.
|
|
*/
|
|
struct alignas(1) qint8 {
|
|
using underlying = int8_t;
|
|
int8_t val_;
|
|
qint8() = default;
|
|
C10_HOST_DEVICE explicit qint8(int8_t val) : val_(val) {}
|
|
};
|
|
|
|
} // namespace c10
|