mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/14163 Some of the names we were using to guard the header file was too short (e.g. DYNAMIC_HISTOGRAM_H). Reviewed By: csummersea Differential Revision: D13115451 fbshipit-source-id: cef8c84c62922616ceea17effff7bdf8d67302a2
34 lines
780 B
C++
34 lines
780 B
C++
#pragma once
|
|
|
|
#include "tanh.h"
|
|
|
|
namespace dnnlowp {
|
|
|
|
/**
|
|
* sigmoid(x) = (tanh(x/2) + 1)/2
|
|
* Quantized sigmoid is computed as tanh under the hood, we just use different
|
|
* input/output quantization parameters.
|
|
*/
|
|
template <typename T>
|
|
class Sigmoid {
|
|
public:
|
|
Sigmoid(double max_abs_err_ = Tanh<T>::DEFAULT_MAX_ABS_ERR);
|
|
|
|
T Compute(T x) const;
|
|
|
|
TensorQuantizationParams GetInputQuantizationParams() const {
|
|
return in_qparams_;
|
|
}
|
|
TensorQuantizationParams GetOutputQuantizationParams() const {
|
|
return out_qparams_;
|
|
}
|
|
|
|
private:
|
|
const int num_in_bits_ = Tanh<T>::DEFAULT_NUM_IN_BITS;
|
|
const int num_out_bits_ = Tanh<T>::DEFAULT_NUM_OUT_BITS;
|
|
Tanh<T> tanh_;
|
|
TensorQuantizationParams in_qparams_, out_qparams_;
|
|
}; // class Sigmoid
|
|
|
|
} // namespace dnnlowp
|