pytorch/caffe2/quantization/server/sigmoid.h
Marc Fisher 9e60b00316 Remove AutoHeaders.RECURSIVE_GLOB from caffe2/ (#73227)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/73227

Reviewed By: christycylee

Differential Revision: D34016914

fbshipit-source-id: 277937f3c13a54ea1180afac253ee9927e56e99e
(cherry picked from commit d97777318170a406d89755e577386cde857dd59b)
2022-03-01 19:31:44 +00:00

34 lines
807 B
C++

#pragma once
#include "caffe2/quantization/server/tanh.h"
namespace dnnlowp {
/**
* sigmoid(x) = (tanh(x/2) + 1)/2
* Quantized sigmoid is computed as tanh under the hood, we just use different
* input/output quantization parameters.
*/
template <typename T>
class Sigmoid {
public:
Sigmoid(double max_abs_err_ = Tanh<T>::DEFAULT_MAX_ABS_ERR);
T Compute(T x) const;
TensorQuantizationParams GetInputQuantizationParams() const {
return in_qparams_;
}
TensorQuantizationParams GetOutputQuantizationParams() const {
return out_qparams_;
}
private:
const int num_in_bits_ = Tanh<T>::DEFAULT_NUM_IN_BITS;
const int num_out_bits_ = Tanh<T>::DEFAULT_NUM_OUT_BITS;
Tanh<T> tanh_;
TensorQuantizationParams in_qparams_, out_qparams_;
}; // class Sigmoid
} // namespace dnnlowp