pytorch/c10/core/QEngine.h
Weiwen Xia 060f1b822a Add onednn quant backend (#74137)
Summary:
Resolve the conflicts in https://github.com/pytorch/pytorch/pull/69820
jerryzh168 Please review. Thanks.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/74137

Reviewed By: samdow

Differential Revision: D34840477

Pulled By: jerryzh168

fbshipit-source-id: 8aa60981ff7be211a1609644f273b16d18efd425
(cherry picked from commit de76bb808b315e9a2e45d8c5f1c1233a47d669c4)
2022-03-15 01:28:21 +00:00

43 lines
958 B
C++

#pragma once
#include <c10/core/DeviceType.h>
#include <c10/core/DispatchKey.h>
#include <c10/util/Exception.h>
namespace c10 {
/**
* QEngine is an enum that is used to select the engine to run quantized ops.
* Keep this enum in sync with get_qengine_id() in
* torch/backends/quantized/__init__.py
*/
enum class QEngine : uint8_t {
NoQEngine = 0,
FBGEMM = 1,
QNNPACK = 2,
ONEDNN = 3,
};
constexpr auto kNoQEngine = QEngine::NoQEngine;
constexpr auto kFBGEMM = QEngine::FBGEMM;
constexpr auto kQNNPACK = QEngine::QNNPACK;
constexpr auto kONEDNN = QEngine::ONEDNN;
inline std::string toString(QEngine qengine) {
switch (qengine) {
case kNoQEngine:
return "NoQEngine";
case kFBGEMM:
return "FBGEMM";
case kQNNPACK:
return "QNNPACK";
case kONEDNN:
return "ONEDNN";
default:
TORCH_CHECK(
false, "Unrecognized Quantized Engine: ", static_cast<int>(qengine));
}
}
} // namespace c10