mirror of
https://github.com/zebrajr/faceswap.git
synced 2025-12-06 00:20:09 +01:00
Add CPU option to BiSeNet and MTCNN
- Add CPU option to KSession - MTCNN optimizations - Update docs for bisenet + mtcnn
This commit is contained in:
parent
629c02a61e
commit
26dde3c19a
|
|
@ -39,14 +39,6 @@ _base module
|
|||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
detect._base module
|
||||
-------------------
|
||||
|
||||
.. automodule:: plugins.extract.detect._base
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
align._base module
|
||||
------------------
|
||||
|
||||
|
|
@ -55,18 +47,55 @@ align._base module
|
|||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mask._base module
|
||||
-----------------
|
||||
|
||||
.. automodule:: plugins.extract.mask._base
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
vgg\_face2\_keras module
|
||||
------------------------
|
||||
|
||||
.. automodule:: plugins.extract.recognition.vgg_face2_keras
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
|
||||
detect plugins package
|
||||
======================
|
||||
|
||||
.. contents:: Contents
|
||||
:local:
|
||||
|
||||
detect._base module
|
||||
-------------------
|
||||
|
||||
.. automodule:: plugins.extract.detect._base
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
detect.mtcnn module
|
||||
-------------------
|
||||
|
||||
.. automodule:: plugins.extract.detect.mtcnn
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
|
||||
mask plugins package
|
||||
====================
|
||||
|
||||
.. contents:: Contents
|
||||
:local:
|
||||
|
||||
mask._base module
|
||||
-----------------
|
||||
|
||||
.. automodule:: plugins.extract.mask._base
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
mask.bisenet_fp module
|
||||
----------------------
|
||||
.. automodule:: plugins.extract.mask.bisenet_fp
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
|
@ -1,8 +1,9 @@
|
|||
#!/usr/bin python3
|
||||
""" Settings manager for Keras Backend """
|
||||
|
||||
from contextlib import nullcontext
|
||||
import logging
|
||||
from typing import Callable, List, Optional, Union
|
||||
from typing import Callable, ContextManager, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
|
@ -50,21 +51,25 @@ class KSession():
|
|||
exclude_gpus: list, optional
|
||||
A list of indices correlating to connected GPUs that Tensorflow should not use. Pass
|
||||
``None`` to not exclude any GPUs. Default: ``None``
|
||||
|
||||
cpu_mode: bool, optional
|
||||
``True`` run the model on CPU. Default: ``False``
|
||||
"""
|
||||
def __init__(self,
|
||||
name: str,
|
||||
model_path: str,
|
||||
model_kwargs: Optional[dict] = None,
|
||||
allow_growth: bool = False,
|
||||
exclude_gpus: Optional[List[int]] = None) -> None:
|
||||
exclude_gpus: Optional[List[int]] = None,
|
||||
cpu_mode: bool = False) -> None:
|
||||
logger.trace("Initializing: %s (name: %s, model_path: %s, " # type:ignore
|
||||
"model_kwargs: %s, allow_growth: %s, exclude_gpus: %s)",
|
||||
"model_kwargs: %s, allow_growth: %s, exclude_gpus: %s, cpu_mode: %s)",
|
||||
self.__class__.__name__, name, model_path, model_kwargs, allow_growth,
|
||||
exclude_gpus)
|
||||
exclude_gpus, cpu_mode)
|
||||
self._name = name
|
||||
self._backend = get_backend()
|
||||
self._set_session(allow_growth, [] if exclude_gpus is None else exclude_gpus)
|
||||
self._context = self._set_session(allow_growth,
|
||||
[] if exclude_gpus is None else exclude_gpus,
|
||||
cpu_mode)
|
||||
self._model_path = model_path
|
||||
self._model_kwargs = {} if not model_kwargs else model_kwargs
|
||||
self._model: Optional[Model] = None
|
||||
|
|
@ -94,9 +99,10 @@ class KSession():
|
|||
The predictions from the model
|
||||
"""
|
||||
assert self._model is not None
|
||||
if self._backend == "amd" and batch_size is not None:
|
||||
return self._amd_predict_with_optimized_batchsizes(feed, batch_size)
|
||||
return self._model.predict(feed, verbose=0, batch_size=batch_size)
|
||||
with self._context:
|
||||
if self._backend == "amd" and batch_size is not None:
|
||||
return self._amd_predict_with_optimized_batchsizes(feed, batch_size)
|
||||
return self._model.predict(feed, verbose=0, batch_size=batch_size)
|
||||
|
||||
def _amd_predict_with_optimized_batchsizes(
|
||||
self,
|
||||
|
|
@ -133,7 +139,10 @@ class KSession():
|
|||
return np.concatenate(results)
|
||||
return [np.concatenate(x) for x in zip(*results)]
|
||||
|
||||
def _set_session(self, allow_growth: bool, exclude_gpus: list) -> None:
|
||||
def _set_session(self,
|
||||
allow_growth: bool,
|
||||
exclude_gpus: list,
|
||||
cpu_mode: bool) -> ContextManager:
|
||||
""" Sets the backend session options.
|
||||
|
||||
For AMD backend this does nothing.
|
||||
|
|
@ -152,13 +161,16 @@ class KSession():
|
|||
exclude_gpus: list
|
||||
A list of indices correlating to connected GPUs that Tensorflow should not use. Pass
|
||||
``None`` to not exclude any GPUs
|
||||
cpu_mode: bool
|
||||
``True`` run the model on CPU. Default: ``False``
|
||||
"""
|
||||
retval = nullcontext()
|
||||
if self._backend == "amd":
|
||||
return
|
||||
return retval
|
||||
if self._backend == "cpu":
|
||||
logger.verbose("Hiding GPUs from Tensorflow") # type:ignore
|
||||
tf.config.set_visible_devices([], "GPU")
|
||||
return
|
||||
return retval
|
||||
|
||||
gpus = tf.config.list_physical_devices('GPU')
|
||||
if exclude_gpus:
|
||||
|
|
@ -171,6 +183,10 @@ class KSession():
|
|||
logger.info("Setting allow growth for GPU: %s", gpu)
|
||||
tf.config.experimental.set_memory_growth(gpu, True)
|
||||
|
||||
if cpu_mode:
|
||||
retval = tf.device("/device:cpu:0")
|
||||
return retval
|
||||
|
||||
def load_model(self) -> None:
|
||||
""" Loads a model.
|
||||
|
||||
|
|
@ -183,9 +199,10 @@ class KSession():
|
|||
it thread safe.
|
||||
"""
|
||||
logger.verbose("Initializing plugin model: %s", self._name) # type:ignore
|
||||
self._model = k_load_model(self._model_path, compile=False, **self._model_kwargs)
|
||||
if self._backend != "amd":
|
||||
self._model.make_predict_function()
|
||||
with self._context:
|
||||
self._model = k_load_model(self._model_path, compile=False, **self._model_kwargs)
|
||||
if self._backend != "amd":
|
||||
self._model.make_predict_function()
|
||||
|
||||
def define_model(self, function: Callable) -> None:
|
||||
""" Defines a model from the given function.
|
||||
|
|
@ -199,7 +216,8 @@ class KSession():
|
|||
``outputs``. The function that generates these results should be passed in, NOT the
|
||||
results themselves, as the function needs to be executed within the correct context.
|
||||
"""
|
||||
self._model = Model(*function())
|
||||
with self._context:
|
||||
self._model = Model(*function())
|
||||
|
||||
def load_model_weights(self) -> None:
|
||||
""" Load model weights for a defined model inside the correct session.
|
||||
|
|
@ -213,9 +231,10 @@ class KSession():
|
|||
"""
|
||||
logger.verbose("Initializing plugin model: %s", self._name) # type:ignore
|
||||
assert self._model is not None
|
||||
self._model.load_weights(self._model_path)
|
||||
if self._backend != "amd":
|
||||
self._model.make_predict_function()
|
||||
with self._context:
|
||||
self._model.load_weights(self._model_path)
|
||||
if self._backend != "amd":
|
||||
self._model.make_predict_function()
|
||||
|
||||
def append_softmax_activation(self, layer_index: int = -1) -> None:
|
||||
""" Append a softmax activation layer to a model
|
||||
|
|
@ -231,5 +250,6 @@ class KSession():
|
|||
"""
|
||||
logger.debug("Appending Softmax Activation to model: (layer_index: %s)", layer_index)
|
||||
assert self._model is not None
|
||||
softmax = Activation("softmax", name="softmax")(self._model.layers[layer_index].output)
|
||||
self._model = Model(inputs=self._model.input, outputs=[softmax])
|
||||
with self._context:
|
||||
softmax = Activation("softmax", name="softmax")(self._model.layers[layer_index].output)
|
||||
self._model = Model(inputs=self._model.input, outputs=[softmax])
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -88,6 +88,13 @@ _DEFAULTS = {
|
|||
gui_radio=False,
|
||||
fixed=True,
|
||||
),
|
||||
"cpu": dict(
|
||||
default=True,
|
||||
info="[Nvidia Only] MTCNN detector still runs fairly quickly on CPU on some setups. "
|
||||
"Enable CPU mode here to use the CPU for this detector to save some VRAM at a speed "
|
||||
"cost.",
|
||||
datatype=bool,
|
||||
group="settings"),
|
||||
"threshold_1": dict(
|
||||
default=0.6,
|
||||
info="First stage threshold for face detection. This stage obtains face candidates.",
|
||||
|
|
|
|||
|
|
@ -36,9 +36,9 @@ class Mask(Masker):
|
|||
self.name = "BiSeNet - Face Parsing"
|
||||
self.input_size = 512
|
||||
self.color_format = "RGB"
|
||||
self.vram = 2304
|
||||
self.vram_warnings = 256
|
||||
self.vram_per_batch = 64
|
||||
self.vram = 2304 if not self.config["cpu"] else 0
|
||||
self.vram_warnings = 256 if not self.config["cpu"] else 0
|
||||
self.vram_per_batch = 64 if not self.config["cpu"] else 0
|
||||
self.batchsize = self.config["batch-size"]
|
||||
|
||||
self._segment_indices = self._get_segment_indices()
|
||||
|
|
@ -107,7 +107,8 @@ class Mask(Masker):
|
|||
self.config["allow_growth"],
|
||||
self._exclude_gpus,
|
||||
self.input_size,
|
||||
lbls)
|
||||
lbls,
|
||||
self.config["cpu"])
|
||||
|
||||
placeholder = np.zeros((self.batchsize, self.input_size, self.input_size, 3),
|
||||
dtype="float32")
|
||||
|
|
@ -535,12 +536,15 @@ class BiSeNet(KSession):
|
|||
The input size to the model
|
||||
num_classes: int
|
||||
The number of segmentation classes to create
|
||||
cpu_mode: bool, optional
|
||||
``True`` run the model on CPU. Default: ``False``
|
||||
"""
|
||||
def __init__(self, model_path, allow_growth, exclude_gpus, input_size, num_classes):
|
||||
def __init__(self, model_path, allow_growth, exclude_gpus, input_size, num_classes, cpu_mode):
|
||||
super().__init__("BiSeNet Face Parsing",
|
||||
model_path,
|
||||
allow_growth=allow_growth,
|
||||
exclude_gpus=exclude_gpus)
|
||||
exclude_gpus=exclude_gpus,
|
||||
cpu_mode=cpu_mode)
|
||||
self._input_size = input_size
|
||||
self._num_classes = num_classes
|
||||
self._cp = ContextPath()
|
||||
|
|
|
|||
|
|
@ -63,6 +63,12 @@ _DEFAULTS = {
|
|||
group="settings",
|
||||
gui_radio=False,
|
||||
fixed=True),
|
||||
"cpu": dict(
|
||||
default=False,
|
||||
info="[Nvidia Only] BiseNet mask still runs fairly quickly on CPU on some setups. Enable "
|
||||
"CPU mode here to use the CPU for this masker to save some VRAM at a speed cost.",
|
||||
datatype=bool,
|
||||
group="settings"),
|
||||
"weights": dict(
|
||||
default="faceswap",
|
||||
info="The trained weights to use.\n"
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user