Minor fixups and linting

This commit is contained in:
torzdf 2024-04-03 15:14:32 +01:00
parent 983901466f
commit 70c064ca7d
32 changed files with 347 additions and 338 deletions

View File

@ -350,7 +350,6 @@ class GraphDisplay(DisplayOptionalPage): # pylint:disable=too-many-ancestors
self.after(1000, self.display_item_process) self.after(1000, self.display_item_process)
return return
logger.debug("Adding graph")
existing = list(self.subnotebook_get_titles_ids().keys()) existing = list(self.subnotebook_get_titles_ids().keys())
loss_keys = self.display_item.get_loss_keys(Session.session_ids[-1]) loss_keys = self.display_item.get_loss_keys(Session.session_ids[-1])
@ -367,6 +366,7 @@ class GraphDisplay(DisplayOptionalPage): # pylint:disable=too-many-ancestors
tabname = loss_key.replace("_", " ").title() tabname = loss_key.replace("_", " ").title()
if tabname in existing: if tabname in existing:
continue continue
logger.debug("Adding graph '%s'", tabname)
display_keys = [key for key in loss_keys if key.startswith(loss_key)] display_keys = [key for key in loss_keys if key.startswith(loss_key)]
data = Calculations(session_id=Session.session_ids[-1], data = Calculations(session_id=Session.session_ids[-1],

View File

@ -129,9 +129,9 @@ class CliOptions():
helptext=opt["help"], helptext=opt["help"],
track_modified=True, track_modified=True,
command=command) command=command)
gui_options[title] = dict(cpanel_option=cpanel_option, gui_options[title] = {"cpanel_option": cpanel_option,
opts=opt["opts"], "opts": opt["opts"],
nargs=opt.get("nargs", None)) "nargs": opt.get("nargs", None)}
logger.trace("Processed: %s", gui_options) logger.trace("Processed: %s", gui_options)
return gui_options return gui_options

View File

@ -90,8 +90,8 @@ class _GuiSession():
def _selected_to_choices(self): def _selected_to_choices(self):
""" dict: The selected value and valid choices for multi-option, radio or combo options. """ dict: The selected value and valid choices for multi-option, radio or combo options.
""" """
valid_choices = {cmd: {opt: dict(choices=val["cpanel_option"].choices, valid_choices = {cmd: {opt: {"choices": val["cpanel_option"].choices,
is_multi=val["cpanel_option"].is_multi_option) "is_multi": val["cpanel_option"].is_multi_option}
for opt, val in data.items() for opt, val in data.items()
if isinstance(val, dict) and "cpanel_option" in val if isinstance(val, dict) and "cpanel_option" in val
and val["cpanel_option"].choices is not None} and val["cpanel_option"].choices is not None}
@ -600,9 +600,9 @@ class Tasks(_GuiSession):
The tab that pertains to the currently active task The tab that pertains to the currently active task
""" """
self._tasks[command] = dict(filename=self._filename, self._tasks[command] = {"filename": self._filename,
options=self._options, "options": self._options,
is_project=self._is_project) "is_project": self._is_project}
def clear_tasks(self): def clear_tasks(self):
""" Clears all of the stored tasks. """ Clears all of the stored tasks.
@ -629,7 +629,7 @@ class Tasks(_GuiSession):
options: dict options: dict
The options for this task loaded from the project The options for this task loaded from the project
""" """
self._tasks[command] = dict(filename=filename, options=options, is_project=True) self._tasks[command] = {"filename": filename, "options": options, "is_project": True}
def _set_active_task(self, command=None): def _set_active_task(self, command=None):
""" Set the active :attr:`_filename` and :attr:`_options` to currently selected tab's """ Set the active :attr:`_filename` and :attr:`_options` to currently selected tab's

View File

@ -370,7 +370,7 @@ class _Widgets():
("disabled", images[f"img_{lookup}_disabled"]), ("disabled", images[f"img_{lookup}_disabled"]),
("pressed !disabled", images[f"img_{lookup}_active"]), ("pressed !disabled", images[f"img_{lookup}_active"]),
("active !disabled", images[f"img_{lookup}_active"])) ("active !disabled", images[f"img_{lookup}_active"]))
kwargs = dict(border=1, sticky="ns") if element == "thumb" else {} kwargs = {"border": 1, "sticky": "ns"} if element == "thumb" else {}
self._style.element_create(*args, **kwargs) self._style.element_create(*args, **kwargs)
# Get a configurable trough # Get a configurable trough
@ -487,7 +487,7 @@ class _TkImage():
crop_size = (square_size // 16) * 16 crop_size = (square_size // 16) * 16
draw_rows = int(6 * crop_size / 16) draw_rows = int(6 * crop_size / 16)
start_row = dimensions[1] // 2 - draw_rows // 2 start_row = dimensions[1] // 2 - draw_rows // 2
initial_indent = (2 * (crop_size // 16) + (dimensions[0] - crop_size) // 2) initial_indent = 2 * (crop_size // 16) + (dimensions[0] - crop_size) // 2
retval = np.zeros((dimensions[1], dimensions[0]), dtype="uint8") retval = np.zeros((dimensions[1], dimensions[0]), dtype="uint8")
for i in range(start_row, start_row + draw_rows): for i in range(start_row, start_row + draw_rows):

View File

@ -23,7 +23,7 @@ from tqdm import tqdm
from lib.multithreading import MultiThread from lib.multithreading import MultiThread
from lib.queue_manager import queue_manager, QueueEmpty from lib.queue_manager import queue_manager, QueueEmpty
from lib.utils import convert_to_secs, FaceswapError, _video_extensions, get_image_paths from lib.utils import convert_to_secs, FaceswapError, VIDEO_EXTENSIONS, get_image_paths
if T.TYPE_CHECKING: if T.TYPE_CHECKING:
from lib.align.alignments import PNGHeaderDict from lib.align.alignments import PNGHeaderDict
@ -1148,7 +1148,7 @@ class ImagesLoader(ImageIO):
""" """
if not isinstance(self.location, str) or os.path.isdir(self.location): if not isinstance(self.location, str) or os.path.isdir(self.location):
retval = False retval = False
elif os.path.splitext(self.location)[1].lower() in _video_extensions: elif os.path.splitext(self.location)[1].lower() in VIDEO_EXTENSIONS:
retval = True retval = True
else: else:
raise FaceswapError("The input file '{}' is not a valid video".format(self.location)) raise FaceswapError("The input file '{}' is not a valid video".format(self.location))

View File

@ -43,7 +43,7 @@ class KBHit:
self.old_term = termios.tcgetattr(self.file_desc) self.old_term = termios.tcgetattr(self.file_desc)
# New terminal setting unbuffered # New terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO) self.new_term[3] = self.new_term[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(self.file_desc, termios.TCSAFLUSH, self.new_term) termios.tcsetattr(self.file_desc, termios.TCSAFLUSH, self.new_term)
# Support normal-terminal reset at exit # Support normal-terminal reset at exit

View File

@ -24,9 +24,9 @@ if T.TYPE_CHECKING:
from http.client import HTTPResponse from http.client import HTTPResponse
# Global variables # Global variables
_image_extensions = [ # pylint:disable=invalid-name IMAGE_EXTENSIONS = [ # pylint:disable=invalid-name
".bmp", ".jpeg", ".jpg", ".png", ".tif", ".tiff"] ".bmp", ".jpeg", ".jpg", ".png", ".tif", ".tiff"]
_video_extensions = [ # pylint:disable=invalid-name VIDEO_EXTENSIONS = [ # pylint:disable=invalid-name
".avi", ".flv", ".mkv", ".mov", ".mp4", ".mpeg", ".mpg", ".webm", ".wmv", ".avi", ".flv", ".mkv", ".mov", ".mp4", ".mpeg", ".mpg", ".webm", ".wmv",
".ts", ".vob"] ".ts", ".vob"]
_TF_VERS: tuple[int, int] | None = None _TF_VERS: tuple[int, int] | None = None
@ -249,7 +249,7 @@ def get_image_paths(directory: str, extension: str | None = None) -> list[str]:
['/path/to/directory/image1.jpg'] ['/path/to/directory/image1.jpg']
""" """
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
image_extensions = _image_extensions if extension is None else [extension] image_extensions = IMAGE_EXTENSIONS if extension is None else [extension]
dir_contents = [] dir_contents = []
if not os.path.exists(directory): if not os.path.exists(directory):

View File

@ -13,7 +13,7 @@ from plugins.convert._config import Config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Mask(): # pylint:disable=too-few-public-methods class Mask():
""" Manipulations to perform to the mask that is to be applied to the output of the Faceswap """ Manipulations to perform to the mask that is to be applied to the output of the Faceswap
model. model.

View File

@ -46,6 +46,7 @@ class Align(Aligner):
super().__init__(git_model_id=git_model_id, model_filename=model_filename, **kwargs) super().__init__(git_model_id=git_model_id, model_filename=model_filename, **kwargs)
self.model: cv2.dnn.Net self.model: cv2.dnn.Net
self.model_path: str
self.name = "cv2-DNN Aligner" self.name = "cv2-DNN Aligner"
self.input_size = 128 self.input_size = 128
self.color_format = "RGB" self.color_format = "RGB"
@ -56,8 +57,8 @@ class Align(Aligner):
def init_model(self) -> None: def init_model(self) -> None:
""" Initialize CV2 DNN Detector Model""" """ Initialize CV2 DNN Detector Model"""
self.model = cv2.dnn.readNetFromTensorflow(self.model_path) # pylint:disable=no-member self.model = cv2.dnn.readNetFromTensorflow(self.model_path)
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # pylint:disable=no-member self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def faces_to_feed(self, faces: np.ndarray) -> np.ndarray: def faces_to_feed(self, faces: np.ndarray) -> np.ndarray:
""" Convert a batch of face images from UINT8 (0-255) to fp32 (0.0-255.0) """ Convert a batch of face images from UINT8 (0-255) to fp32 (0.0-255.0)
@ -136,7 +137,7 @@ class Align(Aligner):
offsets: list offsets: list
List of offsets for the faces List of offsets for the faces
""" """
logger.trace("Aligning image around center") # type:ignore logger.trace("Aligning image around center") # type:ignore[attr-defined]
sizes = (self.input_size, self.input_size) sizes = (self.input_size, self.input_size)
rois = [] rois = []
faces = [] faces = []
@ -247,7 +248,7 @@ class Align(Aligner):
pad_t = 1 - box[1] if box[1] < 0 else 0 pad_t = 1 - box[1] if box[1] < 0 else 0
pad_r = box[2] - width if box[2] > width else 0 pad_r = box[2] - width if box[2] > width else 0
pad_b = box[3] - height if box[3] > height else 0 pad_b = box[3] - height if box[3] > height else 0
logger.trace("Padding: (l: %s, t: %s, r: %s, b: %s)", # type:ignore logger.trace("Padding: (l: %s, t: %s, r: %s, b: %s)", # type:ignore[attr-defined]
pad_l, pad_t, pad_r, pad_b) pad_l, pad_t, pad_r, pad_b)
padded_image = cv2.copyMakeBorder(image.copy(), padded_image = cv2.copyMakeBorder(image.copy(),
pad_t, pad_t,
@ -257,7 +258,8 @@ class Align(Aligner):
cv2.BORDER_CONSTANT, cv2.BORDER_CONSTANT,
value=(0, 0, 0)) value=(0, 0, 0))
offsets = (pad_l - pad_r, pad_t - pad_b) offsets = (pad_l - pad_r, pad_t - pad_b)
logger.trace("image_shape: %s, Padded shape: %s, box: %s, offsets: %s", # type:ignore logger.trace("image_shape: %s, Padded shape: %s, box: %s, " # type:ignore[attr-defined]
"offsets: %s",
image.shape, padded_image.shape, box, offsets) image.shape, padded_image.shape, box, offsets)
return padded_image, offsets return padded_image, offsets
@ -311,4 +313,4 @@ class Align(Aligner):
points[:, 1] += (roi[1] - offset[1]) points[:, 1] += (roi[1] - offset[1])
landmarks.append(points) landmarks.append(points)
batch.landmarks = np.array(landmarks) batch.landmarks = np.array(landmarks)
logger.trace("Predicted Landmarks: %s", batch.landmarks) # type:ignore logger.trace("Predicted Landmarks: %s", batch.landmarks) # type:ignore[attr-defined]

View File

@ -50,19 +50,19 @@ _HELPTEXT = (
_DEFAULTS = { _DEFAULTS = {
"batch-size": dict( "batch-size": {
default=12, "default": 12,
info="The batch size to use. To a point, higher batch sizes equal better performance, " "info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n" "but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can " "\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered." "accomodate then this will automatically be lowered."
"\n\tAMD users: A batchsize of 8 requires about 4 GB vram.", "\n\tAMD users: A batchsize of 8 requires about 4 GB vram.",
datatype=int, "datatype": int,
rounding=1, "rounding": 1,
min_max=(1, 64), "min_max": (1, 64),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
) }
} }

View File

@ -26,14 +26,14 @@ class Detect(Detector):
def init_model(self) -> None: def init_model(self) -> None:
""" Initialize CV2 DNN Detector Model""" """ Initialize CV2 DNN Detector Model"""
assert isinstance(self.model_path, list) assert isinstance(self.model_path, list)
self.model = cv2.dnn.readNetFromCaffe(self.model_path[1], # pylint:disable=no-member self.model = cv2.dnn.readNetFromCaffe(self.model_path[1],
self.model_path[0]) self.model_path[0])
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # pylint:disable=no-member self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def process_input(self, batch: BatchType) -> None: def process_input(self, batch: BatchType) -> None:
""" Compile the detection image(s) for prediction """ """ Compile the detection image(s) for prediction """
assert isinstance(batch, DetectorBatch) assert isinstance(batch, DetectorBatch)
batch.feed = cv2.dnn.blobFromImages(batch.image, # pylint:disable=no-member batch.feed = cv2.dnn.blobFromImages(batch.image,
scalefactor=1.0, scalefactor=1.0,
size=(self.input_size, self.input_size), size=(self.input_size, self.input_size),
mean=[104, 117, 123], mean=[104, 117, 123],
@ -53,13 +53,13 @@ class Detect(Detector):
for i in range(predictions.shape[2]): for i in range(predictions.shape[2]):
confidence = predictions[0, 0, i, 2] confidence = predictions[0, 0, i, 2]
if confidence >= self.confidence: if confidence >= self.confidence:
logger.trace("Accepting due to confidence %s >= %s", # type:ignore logger.trace("Accepting due to confidence %s >= %s", # type:ignore[attr-defined]
confidence, self.confidence) confidence, self.confidence)
faces.append([(predictions[0, 0, i, 3] * self.input_size), faces.append([(predictions[0, 0, i, 3] * self.input_size),
(predictions[0, 0, i, 4] * self.input_size), (predictions[0, 0, i, 4] * self.input_size),
(predictions[0, 0, i, 5] * self.input_size), (predictions[0, 0, i, 5] * self.input_size),
(predictions[0, 0, i, 6] * self.input_size)]) (predictions[0, 0, i, 6] * self.input_size)])
logger.trace("faces: %s", faces) # type:ignore logger.trace("faces: %s", faces) # type:ignore[attr-defined]
return np.array(faces)[None, ...] return np.array(faces)[None, ...]
def process_output(self, batch: BatchType) -> None: def process_output(self, batch: BatchType) -> None:

View File

@ -50,17 +50,17 @@ _HELPTEXT = (
) )
_DEFAULTS = dict( _DEFAULTS = {
confidence=dict( "confidence": {
default=50, "default": 50,
info="The confidence level at which the detector has succesfully found a face.\nHigher " "info": "The confidence level at which the detector has succesfully found a face.\nHigher "
"levels will be more discriminating, lower levels will have more false positives.", "levels will be more discriminating, lower levels will have more false positives.",
datatype=int, "datatype": int,
rounding=5, "rounding": 5,
min_max=(25, 100), "min_max": (25, 100),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
), },
) }

View File

@ -51,82 +51,83 @@ _HELPTEXT = (
_DEFAULTS = { _DEFAULTS = {
"minsize": dict( "minsize": {
default=20, "default": 20,
info="The minimum size of a face (in pixels) to be accepted as a positive match.\nLower " "info": "The minimum size of a face (in pixels) to be accepted as a positive match."
"values use significantly more VRAM and will detect more false positives.", "\nLower values use significantly more VRAM and will detect more false positives.",
datatype=int, "datatype": int,
rounding=10, "rounding": 10,
min_max=(20, 1000), "min_max": (20, 1000),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
), },
"scalefactor": dict( "scalefactor": {
default=0.709, "default": 0.709,
info="The scale factor for the image pyramid.", "info": "The scale factor for the image pyramid.",
datatype=float, "datatype": float,
rounding=3, "rounding": 3,
min_max=(0.1, 0.9), "min_max": (0.1, 0.9),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
), },
"batch-size": dict( "batch-size": {
default=8, "default": 8,
info="The batch size to use. To a point, higher batch sizes equal better performance, but " "info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"setting it too high can harm performance.\n" "but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate " "\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"then this will automatically be lowered.", "accomodate then this will automatically be lowered.",
datatype=int, "datatype": int,
rounding=1, "rounding": 1,
min_max=(1, 64), "min_max": (1, 64),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
), },
"cpu": dict( "cpu": {
default=True, "default": True,
info="MTCNN detector still runs fairly quickly on CPU on some setups. " "info": "MTCNN detector still runs fairly quickly on CPU on some setups. "
"Enable CPU mode here to use the CPU for this detector to save some VRAM at a speed " "Enable CPU mode here to use the CPU for this detector to save some VRAM at a "
"cost.", "speed cost.",
datatype=bool, "datatype": bool,
group="settings"), "group": "settings"
"threshold_1": dict( },
default=0.6, "threshold_1": {
info="First stage threshold for face detection. This stage obtains face candidates.", "default": 0.6,
datatype=float, "info": "First stage threshold for face detection. This stage obtains face candidates.",
rounding=2, "datatype": float,
min_max=(0.1, 0.9), "rounding": 2,
choices=[], "min_max": (0.1, 0.9),
group="threshold", "choices": [],
gui_radio=False, "group": "threshold",
fixed=True, "gui_radio": False,
), "fixed": True,
"threshold_2": dict( },
default=0.7, "threshold_2": {
info="Second stage threshold for face detection. This stage refines face candidates.", "default": 0.7,
datatype=float, "info": "Second stage threshold for face detection. This stage refines face candidates.",
rounding=2, "datatype": float,
min_max=(0.1, 0.9), "rounding": 2,
choices=[], "min_max": (0.1, 0.9),
group="threshold", "choices": [],
gui_radio=False, "group": "threshold",
fixed=True, "gui_radio": False,
), "fixed": True,
"threshold_3": dict( },
default=0.7, "threshold_3": {
info="Third stage threshold for face detection. This stage further refines face " "default": 0.7,
"info": "Third stage threshold for face detection. This stage further refines face "
"candidates.", "candidates.",
datatype=float, "datatype": float,
rounding=2, "rounding": 2,
min_max=(0.1, 0.9), "min_max": (0.1, 0.9),
choices=[], "choices": [],
group="threshold", "group": "threshold",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
), },
} }

View File

@ -51,32 +51,32 @@ _HELPTEXT = (
_DEFAULTS = { _DEFAULTS = {
"confidence": dict( "confidence": {
default=70, "default": 70,
info="The confidence level at which the detector has succesfully found a face.\n" "info": "The confidence level at which the detector has succesfully found a face.\n"
"Higher levels will be more discriminating, lower levels will have more false " "Higher levels will be more discriminating, lower levels will have more false "
"positives.", "positives.",
datatype=int, "datatype": int,
rounding=5, "rounding": 5,
min_max=(25, 100), "min_max": (25, 100),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
), },
"batch-size": dict( "batch-size": {
default=4, "default": 4,
info="The batch size to use. To a point, higher batch sizes equal better performance, " "info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n" "but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can " "\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered." "accomodate then this will automatically be lowered."
"\n\tAMD users: A batchsize of 8 requires about 2 GB vram.", "\n\tAMD users: A batchsize of 8 requires about 2 GB vram.",
datatype=int, "datatype": int,
rounding=1, "rounding": 1,
min_max=(1, 64), "min_max": (1, 64),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
) }
} }

View File

@ -50,56 +50,58 @@ _HELPTEXT = (
_DEFAULTS = { _DEFAULTS = {
"batch-size": dict( "batch-size": {
default=8, "default": 8,
info="The batch size to use. To a point, higher batch sizes equal better performance, but " "info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"setting it too high can harm performance.\n" "but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate " "\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"then this will automatically be lowered.", "accomodate then this will automatically be lowered.",
datatype=int, "datatype": int,
rounding=1, "rounding": 1,
min_max=(1, 64), "min_max": (1, 64),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True), "fixed": True
"cpu": dict( },
default=False, "cpu": {
info="BiseNet mask still runs fairly quickly on CPU on some setups. Enable " "default": False,
"info": "BiseNet mask still runs fairly quickly on CPU on some setups. Enable "
"CPU mode here to use the CPU for this masker to save some VRAM at a speed cost.", "CPU mode here to use the CPU for this masker to save some VRAM at a speed cost.",
datatype=bool, "datatype": bool,
group="settings"), "group": "settings"
"weights": dict( },
default="faceswap", "weights": {
info="The trained weights to use.\n" "default": "faceswap",
"\n\tfaceswap - Weights trained on wildly varied Faceswap extracted data to better " "info": "The trained weights to use.\n"
"handle varying conditions, obstructions, glasses and multiple targets within a " "\n\tfaceswap - Weights trained on wildly varied Faceswap extracted data to "
"single extracted image." "better handle varying conditions, obstructions, glasses and multiple targets "
"within a single extracted image."
"\n\toriginal - The original weights trained on the CelebAMask-HQ dataset.", "\n\toriginal - The original weights trained on the CelebAMask-HQ dataset.",
choices=["faceswap", "original"], "choices": ["faceswap", "original"],
datatype=str, "datatype": str,
group="settings", "group": "settings",
gui_radio=True, "gui_radio": True,
), },
"include_ears": dict( "include_ears": {
default=False, "default": False,
info="Whether to include ears within the face mask.", "info": "Whether to include ears within the face mask.",
datatype=bool, "datatype": bool,
group="settings" "group": "settings"
), },
"include_hair": dict( "include_hair": {
default=False, "default": False,
info="Whether to include hair within the face mask.", "info": "Whether to include hair within the face mask.",
datatype=bool, "datatype": bool,
group="settings" "group": "settings"
), },
"include_glasses": dict( "include_glasses": {
default=True, "default": True,
info="Whether to include glasses within the face mask.\n\tFor 'original' weights " "info": "Whether to include glasses within the face mask.\n\tFor 'original' weights "
"excluding glasses will mask out the lenses as well as the frames.\n\tFor 'faceswap' " "excluding glasses will mask out the lenses as well as the frames.\n\tFor "
"weights, the model has been trained to mask out lenses if eyes cannot be seen (i.e. " "'faceswap' weights, the model has been trained to mask out lenses if eyes cannot "
"dark sunglasses) or just the frames if the eyes can be seen. ", "be seen (i.e. dark sunglasses) or just the frames if the eyes can be seen.",
datatype=bool, "datatype": bool,
group="settings" "group": "settings"
), },
} }

View File

@ -42,9 +42,9 @@ class Mask(Masker):
for mask, face in zip(feed, faces): for mask, face in zip(feed, faces):
parts = self.parse_parts(np.array(face.landmarks)) parts = self.parse_parts(np.array(face.landmarks))
for item in parts: for item in parts:
item = np.rint(np.concatenate(item)).astype("int32") a_item = np.rint(np.concatenate(item)).astype("int32")
hull = cv2.convexHull(item) hull = cv2.convexHull(a_item)
cv2.fillConvexPoly(mask, hull, 1.0, lineType=cv2.LINE_AA) cv2.fillConvexPoly(mask, hull, [1.0], lineType=cv2.LINE_AA)
return feed return feed
def process_output(self, batch: BatchType) -> None: def process_output(self, batch: BatchType) -> None:

View File

@ -51,29 +51,31 @@ _HELPTEXT = (
_DEFAULTS = { _DEFAULTS = {
"batch-size": dict( "batch-size": {
default=8, "default": 8,
info="The batch size to use. To a point, higher batch sizes equal better performance, but " "info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"setting it too high can harm performance.", "but setting it too high can harm performance.",
datatype=int, "datatype": int,
rounding=1, "rounding": 1,
min_max=(1, 64), "min_max": (1, 64),
group="settings"), "group": "settings"
"centering": dict( },
default="face", "centering": {
info="Whether to create a dummy mask with face or head centering.", "default": "face",
choices=["face", "head"], "info": "Whether to create a dummy mask with face or head centering.",
datatype=str, "choices": ["face", "head"],
group="settings", "datatype": str,
gui_radio=True), "group": "settings",
"fill": dict( "gui_radio": True
default=False, },
info="Whether the mask should be filled (True) in which case the custom mask will be " "fill": {
"created with the whole area masked in (i.e. you would need to manually edit out the " "default": False,
"background) or unfilled (False) in which case you would need to manually edit in " "info": "Whether the mask should be filled (True) in which case the custom mask will be "
"the face.", "created with the whole area masked in (i.e. you would need to manually edit out "
datatype=bool, "the background) or unfilled (False) in which case you would need to manually "
group="settings", "edit in the face.",
gui_radio=True, "datatype": bool,
), "group": "settings",
"gui_radio": True,
},
} }

View File

@ -41,9 +41,9 @@ class Mask(Masker):
for mask, face in zip(feed, faces): for mask, face in zip(feed, faces):
parts = self.parse_parts(np.array(face.landmarks)) parts = self.parse_parts(np.array(face.landmarks))
for item in parts: for item in parts:
item = np.rint(np.concatenate(item)).astype("int32") a_item = np.rint(np.concatenate(item)).astype("int32")
hull = cv2.convexHull(item) hull = cv2.convexHull(a_item)
cv2.fillConvexPoly(mask, hull, 1.0, lineType=cv2.LINE_AA) cv2.fillConvexPoly(mask, hull, [1.0], lineType=cv2.LINE_AA)
return feed return feed
def process_output(self, batch: BatchType) -> None: def process_output(self, batch: BatchType) -> None:

View File

@ -51,18 +51,18 @@ _HELPTEXT = (
_DEFAULTS = { _DEFAULTS = {
"batch-size": dict( "batch-size": {
default=8, "default": 8,
info="The batch size to use. To a point, higher batch sizes equal better performance, but " "info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"setting it too high can harm performance.\n" "but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate " "\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"then this will automatically be lowered.", "accomodate then this will automatically be lowered.",
datatype=int, "datatype": int,
rounding=1, "rounding": 1,
min_max=(1, 64), "min_max": (1, 64),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
) }
} }

View File

@ -50,18 +50,18 @@ _HELPTEXT = (
_DEFAULTS = { _DEFAULTS = {
"batch-size": dict( "batch-size": {
default=6, "default": 6,
info="The batch size to use. To a point, higher batch sizes equal better performance, but " "info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"setting it too high can harm performance.\n" "but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate " "\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"then this will automatically be lowered.", "accomodate then this will automatically be lowered.",
datatype=int, "datatype": int,
rounding=1, "rounding": 1,
min_max=(1, 64), "min_max": (1, 64),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
) }
} }

View File

@ -51,18 +51,18 @@ _HELPTEXT = (
_DEFAULTS = { _DEFAULTS = {
"batch-size": dict( "batch-size": {
default=2, "default": 2,
info="The batch size to use. To a point, higher batch sizes equal better performance, but " "info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"setting it too high can harm performance.\n" "but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate " "\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"then this will automatically be lowered.", "accomodate then this will automatically be lowered.",
datatype=int, "datatype": int,
rounding=1, "rounding": 1,
min_max=(1, 64), "min_max": (1, 64),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True, "fixed": True,
) }
} }

View File

@ -51,23 +51,25 @@ _HELPTEXT = (
_DEFAULTS = { _DEFAULTS = {
"batch-size": dict( "batch-size": {
default=16, "default": 16,
info="The batch size to use. To a point, higher batch sizes equal better performance, but " "info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"setting it too high can harm performance.\n" "but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate " "\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"then this will automatically be lowered.", "accomodate then this will automatically be lowered.",
datatype=int, "datatype": int,
rounding=1, "rounding": 1,
min_max=(1, 64), "min_max": (1, 64),
choices=[], "choices": [],
group="settings", "group": "settings",
gui_radio=False, "gui_radio": False,
fixed=True), "fixed": True
"cpu": dict( },
default=False, "cpu": {
info="VGG Face2 still runs fairly quickly on CPU on some setups. Enable " "default": False,
"info": "VGG Face2 still runs fairly quickly on CPU on some setups. Enable "
"CPU mode here to use the CPU for this plugin to save some VRAM at a speed cost.", "CPU mode here to use the CPU for this plugin to save some VRAM at a speed cost.",
datatype=bool, "datatype": bool,
group="settings"), "group": "settings"
},
} }

View File

@ -16,7 +16,7 @@ from lib.align.alignments import PNGHeaderDict
from lib.image import encode_image, generate_thumbnail, ImagesLoader, ImagesSaver, read_image_meta from lib.image import encode_image, generate_thumbnail, ImagesLoader, ImagesSaver, read_image_meta
from lib.multithreading import MultiThread from lib.multithreading import MultiThread
from lib.utils import get_folder, _image_extensions, _video_extensions from lib.utils import get_folder, IMAGE_EXTENSIONS, VIDEO_EXTENSIONS
from plugins.extract.pipeline import Extractor, ExtractMedia from plugins.extract.pipeline import Extractor, ExtractMedia
from scripts.fsmedia import Alignments, PostProcess, finalize from scripts.fsmedia import Alignments, PostProcess, finalize
@ -90,9 +90,9 @@ class Extract(): # pylint:disable=too-few-public-methods
retval = [os.path.join(self._args.input_dir, fname) retval = [os.path.join(self._args.input_dir, fname)
for fname in os.listdir(self._args.input_dir) for fname in os.listdir(self._args.input_dir)
if (os.path.isdir(os.path.join(self._args.input_dir, fname)) # folder images if (os.path.isdir(os.path.join(self._args.input_dir, fname)) # folder images
and any(os.path.splitext(iname)[-1].lower() in _image_extensions and any(os.path.splitext(iname)[-1].lower() in IMAGE_EXTENSIONS
for iname in os.listdir(os.path.join(self._args.input_dir, fname)))) for iname in os.listdir(os.path.join(self._args.input_dir, fname))))
or os.path.splitext(fname)[-1].lower() in _video_extensions] # video or os.path.splitext(fname)[-1].lower() in VIDEO_EXTENSIONS] # video
logger.debug("Input locations: %s", retval) logger.debug("Input locations: %s", retval)
return retval return retval
@ -268,7 +268,7 @@ class Filter():
retval = [os.path.join(test_folder, fname) retval = [os.path.join(test_folder, fname)
for fname in os.listdir(test_folder) for fname in os.listdir(test_folder)
if os.path.splitext(fname)[-1].lower() in _image_extensions] if os.path.splitext(fname)[-1].lower() in IMAGE_EXTENSIONS]
logger.info("Collected files from folder '%s': %s", test_folder, logger.info("Collected files from folder '%s': %s", test_folder,
[os.path.basename(f) for f in retval]) [os.path.basename(f) for f in retval])
return retval return retval
@ -299,7 +299,7 @@ class Filter():
filt_files = [] if files is None else self._files_from_folder(files) filt_files = [] if files is None else self._files_from_folder(files)
for file in filt_files: for file in filt_files:
if (not os.path.isfile(file) or if (not os.path.isfile(file) or
os.path.splitext(file)[-1].lower() not in _image_extensions): os.path.splitext(file)[-1].lower() not in IMAGE_EXTENSIONS):
logger.warning("Filter file '%s' does not exist or is not an image file", file) logger.warning("Filter file '%s' does not exist or is not an image file", file)
error = True error = True
retval.append(filt_files) retval.append(filt_files)

View File

@ -19,7 +19,7 @@ import imageio
from lib.align import Alignments as AlignmentsBase, get_centered_size from lib.align import Alignments as AlignmentsBase, get_centered_size
from lib.image import count_frames, read_image from lib.image import count_frames, read_image
from lib.utils import (camel_case_split, get_image_paths, _video_extensions) from lib.utils import (camel_case_split, get_image_paths, VIDEO_EXTENSIONS)
if T.TYPE_CHECKING: if T.TYPE_CHECKING:
from collections.abc import Generator from collections.abc import Generator
@ -222,7 +222,7 @@ class Images():
logger.error("Input location %s not found.", self._args.input_dir) logger.error("Input location %s not found.", self._args.input_dir)
sys.exit(1) sys.exit(1)
if (os.path.isfile(self._args.input_dir) and if (os.path.isfile(self._args.input_dir) and
os.path.splitext(self._args.input_dir)[1].lower() in _video_extensions): os.path.splitext(self._args.input_dir)[1].lower() in VIDEO_EXTENSIONS):
logger.info("Input Video: %s", self._args.input_dir) logger.info("Input Video: %s", self._args.input_dir)
retval = True retval = True
else: else:
@ -345,7 +345,7 @@ class Images():
return frame return frame
class PostProcess(): # pylint:disable=too-few-public-methods class PostProcess():
""" Optional pre/post processing tasks for convert and extract. """ Optional pre/post processing tasks for convert and extract.
Builds a pipeline of actions that have optionally been requested to be performed Builds a pipeline of actions that have optionally been requested to be performed
@ -428,7 +428,7 @@ class PostProcess(): # pylint:disable=too-few-public-methods
action.process(extract_media) action.process(extract_media)
class PostProcessAction(): # pylint:disable=too-few-public-methods class PostProcessAction():
""" Parent class for Post Processing Actions. """ Parent class for Post Processing Actions.
Usable in Extract or Convert or both depending on context. Any post-processing actions should Usable in Extract or Convert or both depending on context. Any post-processing actions should
@ -465,7 +465,7 @@ class PostProcessAction(): # pylint:disable=too-few-public-methods
raise NotImplementedError raise NotImplementedError
class DebugLandmarks(PostProcessAction): # pylint:disable=too-few-public-methods class DebugLandmarks(PostProcessAction):
""" Draw debug landmarks on face output. Extract Only """ """ Draw debug landmarks on face output. Extract Only """
def __init__(self, *args, **kwargs) -> None: def __init__(self, *args, **kwargs) -> None:
super().__init__(self, *args, **kwargs) super().__init__(self, *args, **kwargs)

View File

@ -173,7 +173,7 @@ class FaceswapGui(tk.Tk):
return True return True
class Gui(): # pylint:disable=too-few-public-methods class Gui():
""" The GUI process. """ """ The GUI process. """
def __init__(self, arguments): def __init__(self, arguments):
self.root = FaceswapGui(arguments.debug) self.root = FaceswapGui(arguments.debug)

View File

@ -18,7 +18,7 @@ from lib.keypress import KBHit
from lib.multithreading import MultiThread, FSThread from lib.multithreading import MultiThread, FSThread
from lib.training import Preview, PreviewBuffer, TriggerType from lib.training import Preview, PreviewBuffer, TriggerType
from lib.utils import (get_folder, get_image_paths, from lib.utils import (get_folder, get_image_paths,
FaceswapError, _image_extensions) FaceswapError, IMAGE_EXTENSIONS)
from plugins.plugin_loader import PluginLoader from plugins.plugin_loader import PluginLoader
if T.TYPE_CHECKING: if T.TYPE_CHECKING:
@ -31,7 +31,7 @@ if T.TYPE_CHECKING:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Train(): # pylint:disable=too-few-public-methods class Train():
""" The Faceswap Training Process. """ The Faceswap Training Process.
The training process is responsible for training a model on a set of source faces and a set of The training process is responsible for training a model on a set of source faces and a set of
@ -174,7 +174,7 @@ class Train(): # pylint:disable=too-few-public-methods
continue # Time-lapse folder is training folder continue # Time-lapse folder is training folder
filenames = [fname for fname in os.listdir(folder) filenames = [fname for fname in os.listdir(folder)
if os.path.splitext(fname)[-1].lower() in _image_extensions] if os.path.splitext(fname)[-1].lower() in IMAGE_EXTENSIONS]
if not filenames: if not filenames:
raise FaceswapError(f"The Timelapse path '{folder}' does not contain any valid " raise FaceswapError(f"The Timelapse path '{folder}' does not contain any valid "
"images") "images")

View File

@ -8,7 +8,7 @@ import typing as T
from argparse import Namespace from argparse import Namespace
from multiprocessing import Process from multiprocessing import Process
from lib.utils import _video_extensions, FaceswapError from lib.utils import VIDEO_EXTENSIONS, FaceswapError
from .media import AlignmentData from .media import AlignmentData
from .jobs import Check, Sort, Spatial # noqa pylint: disable=unused-import from .jobs import Check, Sort, Spatial # noqa pylint: disable=unused-import
from .jobs_faces import FromFaces, RemoveFaces, Rename # noqa pylint: disable=unused-import from .jobs_faces import FromFaces, RemoveFaces, Rename # noqa pylint: disable=unused-import
@ -117,7 +117,7 @@ class Alignments(): # pylint:disable=too-few-public-methods
candidates = [os.path.join(self._args.frames_dir, fname) candidates = [os.path.join(self._args.frames_dir, fname)
for fname in os.listdir(self._args.frames_dir) for fname in os.listdir(self._args.frames_dir)
if os.path.isdir(os.path.join(self._args.frames_dir, fname)) if os.path.isdir(os.path.join(self._args.frames_dir, fname))
or os.path.splitext(fname)[-1].lower() in _video_extensions] or os.path.splitext(fname)[-1].lower() in VIDEO_EXTENSIONS]
logger.debug("Frame candidates: %s", candidates) logger.debug("Frame candidates: %s", candidates)
for candidate in candidates: for candidate in candidates:
@ -289,7 +289,7 @@ class _Alignments(): # pylint:disable=too-few-public-methods
if os.path.isdir(frames) and os.path.exists(os.path.join(frames, fname)): if os.path.isdir(frames) and os.path.exists(os.path.join(frames, fname)):
return fname return fname
if os.path.isdir(frames) or os.path.splitext(frames)[-1] not in _video_extensions: if os.path.isdir(frames) or os.path.splitext(frames)[-1] not in VIDEO_EXTENSIONS:
logger.error("Can't find a valid alignments file in location: %s", frames) logger.error("Can't find a valid alignments file in location: %s", frames)
sys.exit(1) sys.exit(1)

View File

@ -17,7 +17,7 @@ from tqdm import tqdm
from lib.align import Alignments, DetectedFace, update_legacy_png_header from lib.align import Alignments, DetectedFace, update_legacy_png_header
from lib.image import (count_frames, generate_thumbnail, ImagesLoader, from lib.image import (count_frames, generate_thumbnail, ImagesLoader,
png_write_meta, read_image, read_image_meta_batch) png_write_meta, read_image, read_image_meta_batch)
from lib.utils import _image_extensions, _video_extensions, FaceswapError from lib.utils import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS, FaceswapError
if T.TYPE_CHECKING: if T.TYPE_CHECKING:
from collections.abc import Generator from collections.abc import Generator
@ -134,7 +134,7 @@ class MediaLoader():
if (loadtype == "Frames" and if (loadtype == "Frames" and
os.path.isfile(self.folder) and os.path.isfile(self.folder) and
os.path.splitext(self.folder)[1].lower() in _video_extensions): os.path.splitext(self.folder)[1].lower() in VIDEO_EXTENSIONS):
logger.verbose("Video exists at: '%s'", self.folder) # type: ignore logger.verbose("Video exists at: '%s'", self.folder) # type: ignore
retval = cv2.VideoCapture(self.folder) # pylint:disable=no-member retval = cv2.VideoCapture(self.folder) # pylint:disable=no-member
# TODO ImageIO single frame seek seems slow. Look into this # TODO ImageIO single frame seek seems slow. Look into this
@ -148,7 +148,7 @@ class MediaLoader():
def valid_extension(filename) -> bool: def valid_extension(filename) -> bool:
""" bool: Check whether passed in file has a valid extension """ """ bool: Check whether passed in file has a valid extension """
extension = os.path.splitext(filename)[1] extension = os.path.splitext(filename)[1]
retval = extension.lower() in _image_extensions retval = extension.lower() in IMAGE_EXTENSIONS
logger.trace("Filename has valid extension: '%s': %s", filename, retval) # type: ignore logger.trace("Filename has valid extension: '%s': %s", filename, retval) # type: ignore
return retval return retval

View File

@ -4,7 +4,7 @@ import gettext
from lib.cli.args import FaceSwapArgs from lib.cli.args import FaceSwapArgs
from lib.cli.actions import ContextFullPaths, FileFullPaths, Radio from lib.cli.actions import ContextFullPaths, FileFullPaths, Radio
from lib.utils import _image_extensions from lib.utils import IMAGE_EXTENSIONS
# LOCALES # LOCALES
@ -100,7 +100,7 @@ class EffmpegArgs(FaceSwapArgs):
argument_list.append(dict( argument_list.append(dict(
opts=("-ef", "--extract-filetype"), opts=("-ef", "--extract-filetype"),
action=Radio, action=Radio,
choices=_image_extensions, choices=IMAGE_EXTENSIONS,
dest="extract_ext", dest="extract_ext",
group=_("output"), group=_("output"),
default=".png", default=".png",

View File

@ -17,7 +17,7 @@ import imageio_ffmpeg as im_ffm
from ffmpy import FFmpeg, FFRuntimeError from ffmpy import FFmpeg, FFRuntimeError
# faceswap imports # faceswap imports
from lib.utils import _image_extensions, _video_extensions from lib.utils import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -27,10 +27,10 @@ class DataItem():
A simple class used for storing the media data items and directories that A simple class used for storing the media data items and directories that
Effmpeg uses for 'input', 'output' and 'ref_vid'. Effmpeg uses for 'input', 'output' and 'ref_vid'.
""" """
vid_ext = _video_extensions vid_ext = VIDEO_EXTENSIONS
# future option in effmpeg to use audio file for muxing # future option in effmpeg to use audio file for muxing
audio_ext = ['.aiff', '.flac', '.mp3', '.wav'] audio_ext = ['.aiff', '.flac', '.mp3', '.wav']
img_ext = _image_extensions img_ext = IMAGE_EXTENSIONS
def __init__(self, path=None, name=None, item_type=None, ext=None, def __init__(self, path=None, name=None, item_type=None, ext=None,
fps=None): fps=None):

View File

@ -18,7 +18,7 @@ from lib.gui.control_helper import ControlPanel
from lib.gui.utils import get_images, get_config, initialize_config, initialize_images from lib.gui.utils import get_images, get_config, initialize_config, initialize_images
from lib.image import SingleFrameLoader, read_image_meta from lib.image import SingleFrameLoader, read_image_meta
from lib.multithreading import MultiThread from lib.multithreading import MultiThread
from lib.utils import _video_extensions from lib.utils import VIDEO_EXTENSIONS
from plugins.extract.pipeline import Extractor, ExtractMedia from plugins.extract.pipeline import Extractor, ExtractMedia
from .detected_faces import DetectedFaces from .detected_faces import DetectedFaces
@ -569,7 +569,7 @@ class TkGlobals():
""" """
if os.path.isdir(frames_location): if os.path.isdir(frames_location):
retval = False retval = False
elif os.path.splitext(frames_location)[1].lower() in _video_extensions: elif os.path.splitext(frames_location)[1].lower() in VIDEO_EXTENSIONS:
retval = True retval = True
else: else:
logger.error("The input location '%s' is not valid", frames_location) logger.error("The input location '%s' is not valid", frames_location)

View File

@ -10,7 +10,7 @@ from multiprocessing import Process
from lib.align import Alignments from lib.align import Alignments
from lib.utils import _video_extensions from lib.utils import VIDEO_EXTENSIONS
from plugins.extract.pipeline import ExtractMedia from plugins.extract.pipeline import ExtractMedia
from .loader import Loader from .loader import Loader
@ -64,7 +64,7 @@ class Mask: # pylint:disable=too-few-public-methods
retval = [os.path.join(self._args.input, fname) retval = [os.path.join(self._args.input, fname)
for fname in os.listdir(self._args.input) for fname in os.listdir(self._args.input)
if os.path.isdir(os.path.join(self._args.input, fname)) if os.path.isdir(os.path.join(self._args.input, fname))
or os.path.splitext(fname)[-1].lower() in _video_extensions] or os.path.splitext(fname)[-1].lower() in VIDEO_EXTENSIONS]
logger.info("Batch mode selected. Processing locations: %s", retval) logger.info("Batch mode selected. Processing locations: %s", retval)
return retval return retval