pytorch/caffe2/quantization/server/sigmoid.h
Jongsoo Park 3c2462cf24 use pragma once (#14163)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14163

Some of the names we were using to guard the header file was too short (e.g. DYNAMIC_HISTOGRAM_H).

Reviewed By: csummersea

Differential Revision: D13115451

fbshipit-source-id: cef8c84c62922616ceea17effff7bdf8d67302a2
2018-11-20 00:56:04 -08:00

34 lines
780 B
C++

#pragma once
#include "tanh.h"
namespace dnnlowp {
/**
* sigmoid(x) = (tanh(x/2) + 1)/2
* Quantized sigmoid is computed as tanh under the hood, we just use different
* input/output quantization parameters.
*/
template <typename T>
class Sigmoid {
public:
Sigmoid(double max_abs_err_ = Tanh<T>::DEFAULT_MAX_ABS_ERR);
T Compute(T x) const;
TensorQuantizationParams GetInputQuantizationParams() const {
return in_qparams_;
}
TensorQuantizationParams GetOutputQuantizationParams() const {
return out_qparams_;
}
private:
const int num_in_bits_ = Tanh<T>::DEFAULT_NUM_IN_BITS;
const int num_out_bits_ = Tanh<T>::DEFAULT_NUM_OUT_BITS;
Tanh<T> tanh_;
TensorQuantizationParams in_qparams_, out_qparams_;
}; // class Sigmoid
} // namespace dnnlowp