mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/24299 As suggested in https://github.com/pytorch/pytorch/pull/24232, we will remove the activation observer for dynamic quantization path. ghstack-source-id: 88287094 Differential Revision: D16798590 fbshipit-source-id: 07a245d5584b5b15c6895d9b09deef4a0605073a
18 lines
586 B
Python
18 lines
586 B
Python
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
from collections import namedtuple
|
|
from .observer import *
|
|
from .fake_quantize import *
|
|
|
|
QConfig = namedtuple('QConfig',
|
|
['weight', 'activation'])
|
|
|
|
default_qconfig = QConfig(default_weight_observer(),
|
|
default_observer())
|
|
|
|
QConfig_dynamic = namedtuple('QConfig', ['weight'])
|
|
|
|
default_dynamic_qconfig = QConfig_dynamic(default_weight_observer())
|
|
|
|
default_qat_qconfig = QConfig(default_weight_fake_quant(),
|
|
default_fake_quant())
|