Remove subpixel upscaling option (#1024)

This commit is contained in:
torzdf 2020-05-13 13:50:48 +01:00 committed by GitHub
parent 92bc9af957
commit ac40b0f52f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 14 additions and 37 deletions

View File

@ -9,7 +9,7 @@ from keras.layers.convolutional import Conv2D
from keras.layers.core import Activation
from keras.initializers import he_uniform, VarianceScaling
from .initializers import ICNR, ConvolutionAware
from .layers import PixelShuffler, SubPixelUpscaling, ReflectionPadding2D
from .layers import PixelShuffler, ReflectionPadding2D
from .normalization import InstanceNormalization
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@ -26,10 +26,6 @@ class NNBlocks():
Parameters
----------
use_subpixel: bool, Optional
``True`` if sub-pixel up-scaling layer should be used instead of pixel shuffler for
up-scaling. This option is deprecated as sub-pixel up-scaling is Nvidia only, but is kept
for legacy models. Default: ``False``
use_icnr_init: bool, Optional
``True`` if ICNR initialization should be used rather than the default. Default: ``False``
use_convaware_init: bool, Optional
@ -44,18 +40,16 @@ class NNBlocks():
is being reloaded. Default: ``True``
"""
def __init__(self,
use_subpixel=False,
use_icnr_init=False,
use_convaware_init=False,
use_reflect_padding=False,
first_run=True):
logger.debug("Initializing %s: (use_subpixel: %s, use_icnr_init: %s, use_convaware_init: "
"%s, use_reflect_padding: %s, first_run: %s)",
self.__class__.__name__, use_subpixel, use_icnr_init, use_convaware_init,
logger.debug("Initializing %s: (use_icnr_init: %s, use_convaware_init: %s, "
"use_reflect_padding: %s, first_run: %s)",
self.__class__.__name__, use_icnr_init, use_convaware_init,
use_reflect_padding, first_run)
self.names = dict()
self.first_run = first_run
self.use_subpixel = use_subpixel
self.use_icnr_init = use_icnr_init
self.use_convaware_init = use_convaware_init
self.use_reflect_padding = use_reflect_padding
@ -311,11 +305,7 @@ class NNBlocks():
var_x = InstanceNormalization(name="{}_instancenorm".format(name))(var_x)
if not res_block_follows:
var_x = LeakyReLU(0.1, name="{}_leakyrelu".format(name))(var_x)
if self.use_subpixel:
var_x = SubPixelUpscaling(name="{}_subpixel".format(name),
scale_factor=scale_factor)(var_x)
else:
var_x = PixelShuffler(name="{}_pixelshuffler".format(name), size=scale_factor)(var_x)
var_x = PixelShuffler(name="{}_pixelshuffler".format(name), size=scale_factor)(var_x)
return var_x
# <<< DFaker Model Blocks >>> #

View File

@ -135,13 +135,6 @@ class Config(FaceswapConfig):
"\n\t Building the model will likely take several minutes as the calculations "
"for this initialization technique are expensive. This will only impact starting "
"a new model.")
self.add_item(
section=section, title="subpixel_upscaling", datatype=bool,
default=False, group="network",
info="Use subpixel upscaling rather than pixel shuffler. These techniques "
"are both designed to produce better resolving upscaling than other "
"methods. Each perform the same operations, but using different TF opts."
"\n\t https://arxiv.org/pdf/1609.05158.pdf")
self.add_item(
section=section, title="reflect_padding", datatype=bool,
default=False, group="network",

View File

@ -83,8 +83,7 @@ class ModelBase():
self.vram_savings.pingpong,
training_image_size)
self.blocks = NNBlocks(use_subpixel=self.config["subpixel_upscaling"],
use_icnr_init=self.config["icnr_init"],
self.blocks = NNBlocks(use_icnr_init=self.config["icnr_init"],
use_convaware_init=self.config["conv_aware_init"],
use_reflect_padding=self.config["reflect_padding"],
first_run=self.state.first_run)
@ -377,9 +376,9 @@ class ModelBase():
opt_kwargs = dict(lr=lr, beta_1=beta_1, beta_2=beta_2)
if (self.config.get("clipnorm", False) and
keras.backend.backend() != "plaidml.keras.backend"):
# NB: Clipnorm is ballooning VRAM usage, which is not expected behavior
# and may be a bug in Keras/TF.
# PlaidML has a bug regarding the clipnorm parameter
# NB: Clip-norm is ballooning VRAM usage, which is not expected behavior
# and may be a bug in Keras/Tensorflow.
# PlaidML has a bug regarding the clip-norm parameter
# See: https://github.com/plaidml/plaidml/issues/228
# Workaround by simply removing it.
# TODO: Remove this as soon it is fixed in PlaidML.
@ -581,7 +580,6 @@ class ModelBase():
self.state.inputs = {"face:0": [64, 64, 3]}
self.state.training_size = 256
self.state.config["coverage"] = 62.5
self.state.config["subpixel_upscaling"] = False
self.state.config["reflect_padding"] = False
self.state.config["mask_type"] = None
self.state.config["mask_blur_kernel"] = 3
@ -1014,7 +1012,7 @@ class State():
set it to `mae`. Remove old `dssim_loss` item
* masks - If `learn_mask` does not exist then it is set to ``True`` if `mask_type` is
not ``None`` otherwised it is set to ``False``.
not ``None`` otherwise it is set to ``False``.
* masks type - Replace removed masks 'dfl_full' and 'facehull' with `components` mask

View File

@ -15,14 +15,14 @@ from numpy.testing import assert_allclose
from lib.model.nn_blocks import NNBlocks
from lib.utils import get_backend
_PARAMS = ["use_subpixel", "use_icnr_init", "use_convaware_init", "use_reflect_padding"]
_PARAMS = ["use_icnr_init", "use_convaware_init", "use_reflect_padding"]
_VALUES = list(product([True, False], repeat=len(_PARAMS)))
_IDS = ["{}[{}]".format("|".join([_PARAMS[idx] for idx, b in enumerate(v) if b]),
get_backend().upper()) for v in _VALUES]
def block_test(layer_func, kwargs={}, input_shape=None):
"""Test routine for a faceswaps neural network blocks.
"""Test routine for faceswap neural network blocks.
Tests are simple and are to ensure that the blocks compile on both tensorflow
and plaidml backends
@ -62,13 +62,9 @@ def block_test(layer_func, kwargs={}, input_shape=None):
@pytest.mark.parametrize(_PARAMS, _VALUES, ids=_IDS)
def test_blocks(use_subpixel, use_icnr_init, use_convaware_init, use_reflect_padding):
def test_blocks(use_icnr_init, use_convaware_init, use_reflect_padding):
""" Test for all blocks contained within the NNBlocks Class """
if get_backend() == "amd" and use_subpixel:
# Subpixel upscaling does not work on plaidml so skip this test
pytest.skip("Subpixel upscaling not supported in plaidML")
cls_ = NNBlocks(use_subpixel=use_subpixel,
use_icnr_init=use_icnr_init,
cls_ = NNBlocks(use_icnr_init=use_icnr_init,
use_convaware_init=use_convaware_init,
use_reflect_padding=use_reflect_padding)
block_test(cls_.conv2d, input_shape=(2, 5, 5, 128), kwargs=dict(filters=1024, kernel_size=3))