Minor fixups and linting

This commit is contained in:
torzdf 2024-04-03 15:14:32 +01:00
parent 983901466f
commit 70c064ca7d
32 changed files with 347 additions and 338 deletions

View File

@ -350,7 +350,6 @@ class GraphDisplay(DisplayOptionalPage): # pylint:disable=too-many-ancestors
self.after(1000, self.display_item_process)
return
logger.debug("Adding graph")
existing = list(self.subnotebook_get_titles_ids().keys())
loss_keys = self.display_item.get_loss_keys(Session.session_ids[-1])
@ -367,6 +366,7 @@ class GraphDisplay(DisplayOptionalPage): # pylint:disable=too-many-ancestors
tabname = loss_key.replace("_", " ").title()
if tabname in existing:
continue
logger.debug("Adding graph '%s'", tabname)
display_keys = [key for key in loss_keys if key.startswith(loss_key)]
data = Calculations(session_id=Session.session_ids[-1],

View File

@ -129,9 +129,9 @@ class CliOptions():
helptext=opt["help"],
track_modified=True,
command=command)
gui_options[title] = dict(cpanel_option=cpanel_option,
opts=opt["opts"],
nargs=opt.get("nargs", None))
gui_options[title] = {"cpanel_option": cpanel_option,
"opts": opt["opts"],
"nargs": opt.get("nargs", None)}
logger.trace("Processed: %s", gui_options)
return gui_options

View File

@ -90,8 +90,8 @@ class _GuiSession():
def _selected_to_choices(self):
""" dict: The selected value and valid choices for multi-option, radio or combo options.
"""
valid_choices = {cmd: {opt: dict(choices=val["cpanel_option"].choices,
is_multi=val["cpanel_option"].is_multi_option)
valid_choices = {cmd: {opt: {"choices": val["cpanel_option"].choices,
"is_multi": val["cpanel_option"].is_multi_option}
for opt, val in data.items()
if isinstance(val, dict) and "cpanel_option" in val
and val["cpanel_option"].choices is not None}
@ -600,9 +600,9 @@ class Tasks(_GuiSession):
The tab that pertains to the currently active task
"""
self._tasks[command] = dict(filename=self._filename,
options=self._options,
is_project=self._is_project)
self._tasks[command] = {"filename": self._filename,
"options": self._options,
"is_project": self._is_project}
def clear_tasks(self):
""" Clears all of the stored tasks.
@ -629,7 +629,7 @@ class Tasks(_GuiSession):
options: dict
The options for this task loaded from the project
"""
self._tasks[command] = dict(filename=filename, options=options, is_project=True)
self._tasks[command] = {"filename": filename, "options": options, "is_project": True}
def _set_active_task(self, command=None):
""" Set the active :attr:`_filename` and :attr:`_options` to currently selected tab's

View File

@ -370,7 +370,7 @@ class _Widgets():
("disabled", images[f"img_{lookup}_disabled"]),
("pressed !disabled", images[f"img_{lookup}_active"]),
("active !disabled", images[f"img_{lookup}_active"]))
kwargs = dict(border=1, sticky="ns") if element == "thumb" else {}
kwargs = {"border": 1, "sticky": "ns"} if element == "thumb" else {}
self._style.element_create(*args, **kwargs)
# Get a configurable trough
@ -487,7 +487,7 @@ class _TkImage():
crop_size = (square_size // 16) * 16
draw_rows = int(6 * crop_size / 16)
start_row = dimensions[1] // 2 - draw_rows // 2
initial_indent = (2 * (crop_size // 16) + (dimensions[0] - crop_size) // 2)
initial_indent = 2 * (crop_size // 16) + (dimensions[0] - crop_size) // 2
retval = np.zeros((dimensions[1], dimensions[0]), dtype="uint8")
for i in range(start_row, start_row + draw_rows):

View File

@ -23,7 +23,7 @@ from tqdm import tqdm
from lib.multithreading import MultiThread
from lib.queue_manager import queue_manager, QueueEmpty
from lib.utils import convert_to_secs, FaceswapError, _video_extensions, get_image_paths
from lib.utils import convert_to_secs, FaceswapError, VIDEO_EXTENSIONS, get_image_paths
if T.TYPE_CHECKING:
from lib.align.alignments import PNGHeaderDict
@ -1148,7 +1148,7 @@ class ImagesLoader(ImageIO):
"""
if not isinstance(self.location, str) or os.path.isdir(self.location):
retval = False
elif os.path.splitext(self.location)[1].lower() in _video_extensions:
elif os.path.splitext(self.location)[1].lower() in VIDEO_EXTENSIONS:
retval = True
else:
raise FaceswapError("The input file '{}' is not a valid video".format(self.location))

View File

@ -43,7 +43,7 @@ class KBHit:
self.old_term = termios.tcgetattr(self.file_desc)
# New terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
self.new_term[3] = self.new_term[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(self.file_desc, termios.TCSAFLUSH, self.new_term)
# Support normal-terminal reset at exit

View File

@ -24,9 +24,9 @@ if T.TYPE_CHECKING:
from http.client import HTTPResponse
# Global variables
_image_extensions = [ # pylint:disable=invalid-name
IMAGE_EXTENSIONS = [ # pylint:disable=invalid-name
".bmp", ".jpeg", ".jpg", ".png", ".tif", ".tiff"]
_video_extensions = [ # pylint:disable=invalid-name
VIDEO_EXTENSIONS = [ # pylint:disable=invalid-name
".avi", ".flv", ".mkv", ".mov", ".mp4", ".mpeg", ".mpg", ".webm", ".wmv",
".ts", ".vob"]
_TF_VERS: tuple[int, int] | None = None
@ -249,7 +249,7 @@ def get_image_paths(directory: str, extension: str | None = None) -> list[str]:
['/path/to/directory/image1.jpg']
"""
logger = logging.getLogger(__name__)
image_extensions = _image_extensions if extension is None else [extension]
image_extensions = IMAGE_EXTENSIONS if extension is None else [extension]
dir_contents = []
if not os.path.exists(directory):

View File

@ -13,7 +13,7 @@ from plugins.convert._config import Config
logger = logging.getLogger(__name__)
class Mask(): # pylint:disable=too-few-public-methods
class Mask():
""" Manipulations to perform to the mask that is to be applied to the output of the Faceswap
model.

View File

@ -46,6 +46,7 @@ class Align(Aligner):
super().__init__(git_model_id=git_model_id, model_filename=model_filename, **kwargs)
self.model: cv2.dnn.Net
self.model_path: str
self.name = "cv2-DNN Aligner"
self.input_size = 128
self.color_format = "RGB"
@ -56,8 +57,8 @@ class Align(Aligner):
def init_model(self) -> None:
""" Initialize CV2 DNN Detector Model"""
self.model = cv2.dnn.readNetFromTensorflow(self.model_path) # pylint:disable=no-member
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # pylint:disable=no-member
self.model = cv2.dnn.readNetFromTensorflow(self.model_path)
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def faces_to_feed(self, faces: np.ndarray) -> np.ndarray:
""" Convert a batch of face images from UINT8 (0-255) to fp32 (0.0-255.0)
@ -136,7 +137,7 @@ class Align(Aligner):
offsets: list
List of offsets for the faces
"""
logger.trace("Aligning image around center") # type:ignore
logger.trace("Aligning image around center") # type:ignore[attr-defined]
sizes = (self.input_size, self.input_size)
rois = []
faces = []
@ -247,7 +248,7 @@ class Align(Aligner):
pad_t = 1 - box[1] if box[1] < 0 else 0
pad_r = box[2] - width if box[2] > width else 0
pad_b = box[3] - height if box[3] > height else 0
logger.trace("Padding: (l: %s, t: %s, r: %s, b: %s)", # type:ignore
logger.trace("Padding: (l: %s, t: %s, r: %s, b: %s)", # type:ignore[attr-defined]
pad_l, pad_t, pad_r, pad_b)
padded_image = cv2.copyMakeBorder(image.copy(),
pad_t,
@ -257,7 +258,8 @@ class Align(Aligner):
cv2.BORDER_CONSTANT,
value=(0, 0, 0))
offsets = (pad_l - pad_r, pad_t - pad_b)
logger.trace("image_shape: %s, Padded shape: %s, box: %s, offsets: %s", # type:ignore
logger.trace("image_shape: %s, Padded shape: %s, box: %s, " # type:ignore[attr-defined]
"offsets: %s",
image.shape, padded_image.shape, box, offsets)
return padded_image, offsets
@ -311,4 +313,4 @@ class Align(Aligner):
points[:, 1] += (roi[1] - offset[1])
landmarks.append(points)
batch.landmarks = np.array(landmarks)
logger.trace("Predicted Landmarks: %s", batch.landmarks) # type:ignore
logger.trace("Predicted Landmarks: %s", batch.landmarks) # type:ignore[attr-defined]

View File

@ -50,19 +50,19 @@ _HELPTEXT = (
_DEFAULTS = {
"batch-size": dict(
default=12,
info="The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered."
"\n\tAMD users: A batchsize of 8 requires about 4 GB vram.",
datatype=int,
rounding=1,
min_max=(1, 64),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
)
"batch-size": {
"default": 12,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered."
"\n\tAMD users: A batchsize of 8 requires about 4 GB vram.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
}
}

View File

@ -26,14 +26,14 @@ class Detect(Detector):
def init_model(self) -> None:
""" Initialize CV2 DNN Detector Model"""
assert isinstance(self.model_path, list)
self.model = cv2.dnn.readNetFromCaffe(self.model_path[1], # pylint:disable=no-member
self.model = cv2.dnn.readNetFromCaffe(self.model_path[1],
self.model_path[0])
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # pylint:disable=no-member
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def process_input(self, batch: BatchType) -> None:
""" Compile the detection image(s) for prediction """
assert isinstance(batch, DetectorBatch)
batch.feed = cv2.dnn.blobFromImages(batch.image, # pylint:disable=no-member
batch.feed = cv2.dnn.blobFromImages(batch.image,
scalefactor=1.0,
size=(self.input_size, self.input_size),
mean=[104, 117, 123],
@ -53,13 +53,13 @@ class Detect(Detector):
for i in range(predictions.shape[2]):
confidence = predictions[0, 0, i, 2]
if confidence >= self.confidence:
logger.trace("Accepting due to confidence %s >= %s", # type:ignore
logger.trace("Accepting due to confidence %s >= %s", # type:ignore[attr-defined]
confidence, self.confidence)
faces.append([(predictions[0, 0, i, 3] * self.input_size),
(predictions[0, 0, i, 4] * self.input_size),
(predictions[0, 0, i, 5] * self.input_size),
(predictions[0, 0, i, 6] * self.input_size)])
logger.trace("faces: %s", faces) # type:ignore
logger.trace("faces: %s", faces) # type:ignore[attr-defined]
return np.array(faces)[None, ...]
def process_output(self, batch: BatchType) -> None:

View File

@ -50,17 +50,17 @@ _HELPTEXT = (
)
_DEFAULTS = dict(
confidence=dict(
default=50,
info="The confidence level at which the detector has succesfully found a face.\nHigher "
"levels will be more discriminating, lower levels will have more false positives.",
datatype=int,
rounding=5,
min_max=(25, 100),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
),
)
_DEFAULTS = {
"confidence": {
"default": 50,
"info": "The confidence level at which the detector has succesfully found a face.\nHigher "
"levels will be more discriminating, lower levels will have more false positives.",
"datatype": int,
"rounding": 5,
"min_max": (25, 100),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
},
}

View File

@ -51,82 +51,83 @@ _HELPTEXT = (
_DEFAULTS = {
"minsize": dict(
default=20,
info="The minimum size of a face (in pixels) to be accepted as a positive match.\nLower "
"values use significantly more VRAM and will detect more false positives.",
datatype=int,
rounding=10,
min_max=(20, 1000),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
),
"scalefactor": dict(
default=0.709,
info="The scale factor for the image pyramid.",
datatype=float,
rounding=3,
min_max=(0.1, 0.9),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
),
"batch-size": dict(
default=8,
info="The batch size to use. To a point, higher batch sizes equal better performance, but "
"setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate "
"then this will automatically be lowered.",
datatype=int,
rounding=1,
min_max=(1, 64),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
),
"cpu": dict(
default=True,
info="MTCNN detector still runs fairly quickly on CPU on some setups. "
"Enable CPU mode here to use the CPU for this detector to save some VRAM at a speed "
"cost.",
datatype=bool,
group="settings"),
"threshold_1": dict(
default=0.6,
info="First stage threshold for face detection. This stage obtains face candidates.",
datatype=float,
rounding=2,
min_max=(0.1, 0.9),
choices=[],
group="threshold",
gui_radio=False,
fixed=True,
),
"threshold_2": dict(
default=0.7,
info="Second stage threshold for face detection. This stage refines face candidates.",
datatype=float,
rounding=2,
min_max=(0.1, 0.9),
choices=[],
group="threshold",
gui_radio=False,
fixed=True,
),
"threshold_3": dict(
default=0.7,
info="Third stage threshold for face detection. This stage further refines face "
"candidates.",
datatype=float,
rounding=2,
min_max=(0.1, 0.9),
choices=[],
group="threshold",
gui_radio=False,
fixed=True,
),
"minsize": {
"default": 20,
"info": "The minimum size of a face (in pixels) to be accepted as a positive match."
"\nLower values use significantly more VRAM and will detect more false positives.",
"datatype": int,
"rounding": 10,
"min_max": (20, 1000),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
},
"scalefactor": {
"default": 0.709,
"info": "The scale factor for the image pyramid.",
"datatype": float,
"rounding": 3,
"min_max": (0.1, 0.9),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
},
"batch-size": {
"default": 8,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
},
"cpu": {
"default": True,
"info": "MTCNN detector still runs fairly quickly on CPU on some setups. "
"Enable CPU mode here to use the CPU for this detector to save some VRAM at a "
"speed cost.",
"datatype": bool,
"group": "settings"
},
"threshold_1": {
"default": 0.6,
"info": "First stage threshold for face detection. This stage obtains face candidates.",
"datatype": float,
"rounding": 2,
"min_max": (0.1, 0.9),
"choices": [],
"group": "threshold",
"gui_radio": False,
"fixed": True,
},
"threshold_2": {
"default": 0.7,
"info": "Second stage threshold for face detection. This stage refines face candidates.",
"datatype": float,
"rounding": 2,
"min_max": (0.1, 0.9),
"choices": [],
"group": "threshold",
"gui_radio": False,
"fixed": True,
},
"threshold_3": {
"default": 0.7,
"info": "Third stage threshold for face detection. This stage further refines face "
"candidates.",
"datatype": float,
"rounding": 2,
"min_max": (0.1, 0.9),
"choices": [],
"group": "threshold",
"gui_radio": False,
"fixed": True,
},
}

View File

@ -51,32 +51,32 @@ _HELPTEXT = (
_DEFAULTS = {
"confidence": dict(
default=70,
info="The confidence level at which the detector has succesfully found a face.\n"
"Higher levels will be more discriminating, lower levels will have more false "
"positives.",
datatype=int,
rounding=5,
min_max=(25, 100),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
),
"batch-size": dict(
default=4,
info="The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered."
"\n\tAMD users: A batchsize of 8 requires about 2 GB vram.",
datatype=int,
rounding=1,
min_max=(1, 64),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
)
"confidence": {
"default": 70,
"info": "The confidence level at which the detector has succesfully found a face.\n"
"Higher levels will be more discriminating, lower levels will have more false "
"positives.",
"datatype": int,
"rounding": 5,
"min_max": (25, 100),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
},
"batch-size": {
"default": 4,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered."
"\n\tAMD users: A batchsize of 8 requires about 2 GB vram.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
}
}

View File

@ -50,56 +50,58 @@ _HELPTEXT = (
_DEFAULTS = {
"batch-size": dict(
default=8,
info="The batch size to use. To a point, higher batch sizes equal better performance, but "
"setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate "
"then this will automatically be lowered.",
datatype=int,
rounding=1,
min_max=(1, 64),
choices=[],
group="settings",
gui_radio=False,
fixed=True),
"cpu": dict(
default=False,
info="BiseNet mask still runs fairly quickly on CPU on some setups. Enable "
"CPU mode here to use the CPU for this masker to save some VRAM at a speed cost.",
datatype=bool,
group="settings"),
"weights": dict(
default="faceswap",
info="The trained weights to use.\n"
"\n\tfaceswap - Weights trained on wildly varied Faceswap extracted data to better "
"handle varying conditions, obstructions, glasses and multiple targets within a "
"single extracted image."
"\n\toriginal - The original weights trained on the CelebAMask-HQ dataset.",
choices=["faceswap", "original"],
datatype=str,
group="settings",
gui_radio=True,
),
"include_ears": dict(
default=False,
info="Whether to include ears within the face mask.",
datatype=bool,
group="settings"
),
"include_hair": dict(
default=False,
info="Whether to include hair within the face mask.",
datatype=bool,
group="settings"
),
"include_glasses": dict(
default=True,
info="Whether to include glasses within the face mask.\n\tFor 'original' weights "
"excluding glasses will mask out the lenses as well as the frames.\n\tFor 'faceswap' "
"weights, the model has been trained to mask out lenses if eyes cannot be seen (i.e. "
"dark sunglasses) or just the frames if the eyes can be seen. ",
datatype=bool,
group="settings"
),
"batch-size": {
"default": 8,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True
},
"cpu": {
"default": False,
"info": "BiseNet mask still runs fairly quickly on CPU on some setups. Enable "
"CPU mode here to use the CPU for this masker to save some VRAM at a speed cost.",
"datatype": bool,
"group": "settings"
},
"weights": {
"default": "faceswap",
"info": "The trained weights to use.\n"
"\n\tfaceswap - Weights trained on wildly varied Faceswap extracted data to "
"better handle varying conditions, obstructions, glasses and multiple targets "
"within a single extracted image."
"\n\toriginal - The original weights trained on the CelebAMask-HQ dataset.",
"choices": ["faceswap", "original"],
"datatype": str,
"group": "settings",
"gui_radio": True,
},
"include_ears": {
"default": False,
"info": "Whether to include ears within the face mask.",
"datatype": bool,
"group": "settings"
},
"include_hair": {
"default": False,
"info": "Whether to include hair within the face mask.",
"datatype": bool,
"group": "settings"
},
"include_glasses": {
"default": True,
"info": "Whether to include glasses within the face mask.\n\tFor 'original' weights "
"excluding glasses will mask out the lenses as well as the frames.\n\tFor "
"'faceswap' weights, the model has been trained to mask out lenses if eyes cannot "
"be seen (i.e. dark sunglasses) or just the frames if the eyes can be seen.",
"datatype": bool,
"group": "settings"
},
}

View File

@ -42,9 +42,9 @@ class Mask(Masker):
for mask, face in zip(feed, faces):
parts = self.parse_parts(np.array(face.landmarks))
for item in parts:
item = np.rint(np.concatenate(item)).astype("int32")
hull = cv2.convexHull(item)
cv2.fillConvexPoly(mask, hull, 1.0, lineType=cv2.LINE_AA)
a_item = np.rint(np.concatenate(item)).astype("int32")
hull = cv2.convexHull(a_item)
cv2.fillConvexPoly(mask, hull, [1.0], lineType=cv2.LINE_AA)
return feed
def process_output(self, batch: BatchType) -> None:

View File

@ -51,29 +51,31 @@ _HELPTEXT = (
_DEFAULTS = {
"batch-size": dict(
default=8,
info="The batch size to use. To a point, higher batch sizes equal better performance, but "
"setting it too high can harm performance.",
datatype=int,
rounding=1,
min_max=(1, 64),
group="settings"),
"centering": dict(
default="face",
info="Whether to create a dummy mask with face or head centering.",
choices=["face", "head"],
datatype=str,
group="settings",
gui_radio=True),
"fill": dict(
default=False,
info="Whether the mask should be filled (True) in which case the custom mask will be "
"created with the whole area masked in (i.e. you would need to manually edit out the "
"background) or unfilled (False) in which case you would need to manually edit in "
"the face.",
datatype=bool,
group="settings",
gui_radio=True,
),
"batch-size": {
"default": 8,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"group": "settings"
},
"centering": {
"default": "face",
"info": "Whether to create a dummy mask with face or head centering.",
"choices": ["face", "head"],
"datatype": str,
"group": "settings",
"gui_radio": True
},
"fill": {
"default": False,
"info": "Whether the mask should be filled (True) in which case the custom mask will be "
"created with the whole area masked in (i.e. you would need to manually edit out "
"the background) or unfilled (False) in which case you would need to manually "
"edit in the face.",
"datatype": bool,
"group": "settings",
"gui_radio": True,
},
}

View File

@ -41,9 +41,9 @@ class Mask(Masker):
for mask, face in zip(feed, faces):
parts = self.parse_parts(np.array(face.landmarks))
for item in parts:
item = np.rint(np.concatenate(item)).astype("int32")
hull = cv2.convexHull(item)
cv2.fillConvexPoly(mask, hull, 1.0, lineType=cv2.LINE_AA)
a_item = np.rint(np.concatenate(item)).astype("int32")
hull = cv2.convexHull(a_item)
cv2.fillConvexPoly(mask, hull, [1.0], lineType=cv2.LINE_AA)
return feed
def process_output(self, batch: BatchType) -> None:

View File

@ -51,18 +51,18 @@ _HELPTEXT = (
_DEFAULTS = {
"batch-size": dict(
default=8,
info="The batch size to use. To a point, higher batch sizes equal better performance, but "
"setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate "
"then this will automatically be lowered.",
datatype=int,
rounding=1,
min_max=(1, 64),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
)
"batch-size": {
"default": 8,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
}
}

View File

@ -50,18 +50,18 @@ _HELPTEXT = (
_DEFAULTS = {
"batch-size": dict(
default=6,
info="The batch size to use. To a point, higher batch sizes equal better performance, but "
"setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate "
"then this will automatically be lowered.",
datatype=int,
rounding=1,
min_max=(1, 64),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
)
"batch-size": {
"default": 6,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
}
}

View File

@ -51,18 +51,18 @@ _HELPTEXT = (
_DEFAULTS = {
"batch-size": dict(
default=2,
info="The batch size to use. To a point, higher batch sizes equal better performance, but "
"setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate "
"then this will automatically be lowered.",
datatype=int,
rounding=1,
min_max=(1, 64),
choices=[],
group="settings",
gui_radio=False,
fixed=True,
)
"batch-size": {
"default": 2,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True,
}
}

View File

@ -51,23 +51,25 @@ _HELPTEXT = (
_DEFAULTS = {
"batch-size": dict(
default=16,
info="The batch size to use. To a point, higher batch sizes equal better performance, but "
"setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can accomodate "
"then this will automatically be lowered.",
datatype=int,
rounding=1,
min_max=(1, 64),
choices=[],
group="settings",
gui_radio=False,
fixed=True),
"cpu": dict(
default=False,
info="VGG Face2 still runs fairly quickly on CPU on some setups. Enable "
"CPU mode here to use the CPU for this plugin to save some VRAM at a speed cost.",
datatype=bool,
group="settings"),
"batch-size": {
"default": 16,
"info": "The batch size to use. To a point, higher batch sizes equal better performance, "
"but setting it too high can harm performance.\n"
"\n\tNvidia users: If the batchsize is set higher than the your GPU can "
"accomodate then this will automatically be lowered.",
"datatype": int,
"rounding": 1,
"min_max": (1, 64),
"choices": [],
"group": "settings",
"gui_radio": False,
"fixed": True
},
"cpu": {
"default": False,
"info": "VGG Face2 still runs fairly quickly on CPU on some setups. Enable "
"CPU mode here to use the CPU for this plugin to save some VRAM at a speed cost.",
"datatype": bool,
"group": "settings"
},
}

View File

@ -16,7 +16,7 @@ from lib.align.alignments import PNGHeaderDict
from lib.image import encode_image, generate_thumbnail, ImagesLoader, ImagesSaver, read_image_meta
from lib.multithreading import MultiThread
from lib.utils import get_folder, _image_extensions, _video_extensions
from lib.utils import get_folder, IMAGE_EXTENSIONS, VIDEO_EXTENSIONS
from plugins.extract.pipeline import Extractor, ExtractMedia
from scripts.fsmedia import Alignments, PostProcess, finalize
@ -90,9 +90,9 @@ class Extract(): # pylint:disable=too-few-public-methods
retval = [os.path.join(self._args.input_dir, fname)
for fname in os.listdir(self._args.input_dir)
if (os.path.isdir(os.path.join(self._args.input_dir, fname)) # folder images
and any(os.path.splitext(iname)[-1].lower() in _image_extensions
and any(os.path.splitext(iname)[-1].lower() in IMAGE_EXTENSIONS
for iname in os.listdir(os.path.join(self._args.input_dir, fname))))
or os.path.splitext(fname)[-1].lower() in _video_extensions] # video
or os.path.splitext(fname)[-1].lower() in VIDEO_EXTENSIONS] # video
logger.debug("Input locations: %s", retval)
return retval
@ -268,7 +268,7 @@ class Filter():
retval = [os.path.join(test_folder, fname)
for fname in os.listdir(test_folder)
if os.path.splitext(fname)[-1].lower() in _image_extensions]
if os.path.splitext(fname)[-1].lower() in IMAGE_EXTENSIONS]
logger.info("Collected files from folder '%s': %s", test_folder,
[os.path.basename(f) for f in retval])
return retval
@ -299,7 +299,7 @@ class Filter():
filt_files = [] if files is None else self._files_from_folder(files)
for file in filt_files:
if (not os.path.isfile(file) or
os.path.splitext(file)[-1].lower() not in _image_extensions):
os.path.splitext(file)[-1].lower() not in IMAGE_EXTENSIONS):
logger.warning("Filter file '%s' does not exist or is not an image file", file)
error = True
retval.append(filt_files)

View File

@ -19,7 +19,7 @@ import imageio
from lib.align import Alignments as AlignmentsBase, get_centered_size
from lib.image import count_frames, read_image
from lib.utils import (camel_case_split, get_image_paths, _video_extensions)
from lib.utils import (camel_case_split, get_image_paths, VIDEO_EXTENSIONS)
if T.TYPE_CHECKING:
from collections.abc import Generator
@ -222,7 +222,7 @@ class Images():
logger.error("Input location %s not found.", self._args.input_dir)
sys.exit(1)
if (os.path.isfile(self._args.input_dir) and
os.path.splitext(self._args.input_dir)[1].lower() in _video_extensions):
os.path.splitext(self._args.input_dir)[1].lower() in VIDEO_EXTENSIONS):
logger.info("Input Video: %s", self._args.input_dir)
retval = True
else:
@ -345,7 +345,7 @@ class Images():
return frame
class PostProcess(): # pylint:disable=too-few-public-methods
class PostProcess():
""" Optional pre/post processing tasks for convert and extract.
Builds a pipeline of actions that have optionally been requested to be performed
@ -428,7 +428,7 @@ class PostProcess(): # pylint:disable=too-few-public-methods
action.process(extract_media)
class PostProcessAction(): # pylint:disable=too-few-public-methods
class PostProcessAction():
""" Parent class for Post Processing Actions.
Usable in Extract or Convert or both depending on context. Any post-processing actions should
@ -465,7 +465,7 @@ class PostProcessAction(): # pylint:disable=too-few-public-methods
raise NotImplementedError
class DebugLandmarks(PostProcessAction): # pylint:disable=too-few-public-methods
class DebugLandmarks(PostProcessAction):
""" Draw debug landmarks on face output. Extract Only """
def __init__(self, *args, **kwargs) -> None:
super().__init__(self, *args, **kwargs)

View File

@ -173,7 +173,7 @@ class FaceswapGui(tk.Tk):
return True
class Gui(): # pylint:disable=too-few-public-methods
class Gui():
""" The GUI process. """
def __init__(self, arguments):
self.root = FaceswapGui(arguments.debug)

View File

@ -18,7 +18,7 @@ from lib.keypress import KBHit
from lib.multithreading import MultiThread, FSThread
from lib.training import Preview, PreviewBuffer, TriggerType
from lib.utils import (get_folder, get_image_paths,
FaceswapError, _image_extensions)
FaceswapError, IMAGE_EXTENSIONS)
from plugins.plugin_loader import PluginLoader
if T.TYPE_CHECKING:
@ -31,7 +31,7 @@ if T.TYPE_CHECKING:
logger = logging.getLogger(__name__)
class Train(): # pylint:disable=too-few-public-methods
class Train():
""" The Faceswap Training Process.
The training process is responsible for training a model on a set of source faces and a set of
@ -174,7 +174,7 @@ class Train(): # pylint:disable=too-few-public-methods
continue # Time-lapse folder is training folder
filenames = [fname for fname in os.listdir(folder)
if os.path.splitext(fname)[-1].lower() in _image_extensions]
if os.path.splitext(fname)[-1].lower() in IMAGE_EXTENSIONS]
if not filenames:
raise FaceswapError(f"The Timelapse path '{folder}' does not contain any valid "
"images")

View File

@ -8,7 +8,7 @@ import typing as T
from argparse import Namespace
from multiprocessing import Process
from lib.utils import _video_extensions, FaceswapError
from lib.utils import VIDEO_EXTENSIONS, FaceswapError
from .media import AlignmentData
from .jobs import Check, Sort, Spatial # noqa pylint: disable=unused-import
from .jobs_faces import FromFaces, RemoveFaces, Rename # noqa pylint: disable=unused-import
@ -117,7 +117,7 @@ class Alignments(): # pylint:disable=too-few-public-methods
candidates = [os.path.join(self._args.frames_dir, fname)
for fname in os.listdir(self._args.frames_dir)
if os.path.isdir(os.path.join(self._args.frames_dir, fname))
or os.path.splitext(fname)[-1].lower() in _video_extensions]
or os.path.splitext(fname)[-1].lower() in VIDEO_EXTENSIONS]
logger.debug("Frame candidates: %s", candidates)
for candidate in candidates:
@ -289,7 +289,7 @@ class _Alignments(): # pylint:disable=too-few-public-methods
if os.path.isdir(frames) and os.path.exists(os.path.join(frames, fname)):
return fname
if os.path.isdir(frames) or os.path.splitext(frames)[-1] not in _video_extensions:
if os.path.isdir(frames) or os.path.splitext(frames)[-1] not in VIDEO_EXTENSIONS:
logger.error("Can't find a valid alignments file in location: %s", frames)
sys.exit(1)

View File

@ -17,7 +17,7 @@ from tqdm import tqdm
from lib.align import Alignments, DetectedFace, update_legacy_png_header
from lib.image import (count_frames, generate_thumbnail, ImagesLoader,
png_write_meta, read_image, read_image_meta_batch)
from lib.utils import _image_extensions, _video_extensions, FaceswapError
from lib.utils import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS, FaceswapError
if T.TYPE_CHECKING:
from collections.abc import Generator
@ -134,7 +134,7 @@ class MediaLoader():
if (loadtype == "Frames" and
os.path.isfile(self.folder) and
os.path.splitext(self.folder)[1].lower() in _video_extensions):
os.path.splitext(self.folder)[1].lower() in VIDEO_EXTENSIONS):
logger.verbose("Video exists at: '%s'", self.folder) # type: ignore
retval = cv2.VideoCapture(self.folder) # pylint:disable=no-member
# TODO ImageIO single frame seek seems slow. Look into this
@ -148,7 +148,7 @@ class MediaLoader():
def valid_extension(filename) -> bool:
""" bool: Check whether passed in file has a valid extension """
extension = os.path.splitext(filename)[1]
retval = extension.lower() in _image_extensions
retval = extension.lower() in IMAGE_EXTENSIONS
logger.trace("Filename has valid extension: '%s': %s", filename, retval) # type: ignore
return retval

View File

@ -4,7 +4,7 @@ import gettext
from lib.cli.args import FaceSwapArgs
from lib.cli.actions import ContextFullPaths, FileFullPaths, Radio
from lib.utils import _image_extensions
from lib.utils import IMAGE_EXTENSIONS
# LOCALES
@ -100,7 +100,7 @@ class EffmpegArgs(FaceSwapArgs):
argument_list.append(dict(
opts=("-ef", "--extract-filetype"),
action=Radio,
choices=_image_extensions,
choices=IMAGE_EXTENSIONS,
dest="extract_ext",
group=_("output"),
default=".png",

View File

@ -17,7 +17,7 @@ import imageio_ffmpeg as im_ffm
from ffmpy import FFmpeg, FFRuntimeError
# faceswap imports
from lib.utils import _image_extensions, _video_extensions
from lib.utils import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS
logger = logging.getLogger(__name__)
@ -27,10 +27,10 @@ class DataItem():
A simple class used for storing the media data items and directories that
Effmpeg uses for 'input', 'output' and 'ref_vid'.
"""
vid_ext = _video_extensions
vid_ext = VIDEO_EXTENSIONS
# future option in effmpeg to use audio file for muxing
audio_ext = ['.aiff', '.flac', '.mp3', '.wav']
img_ext = _image_extensions
img_ext = IMAGE_EXTENSIONS
def __init__(self, path=None, name=None, item_type=None, ext=None,
fps=None):

View File

@ -18,7 +18,7 @@ from lib.gui.control_helper import ControlPanel
from lib.gui.utils import get_images, get_config, initialize_config, initialize_images
from lib.image import SingleFrameLoader, read_image_meta
from lib.multithreading import MultiThread
from lib.utils import _video_extensions
from lib.utils import VIDEO_EXTENSIONS
from plugins.extract.pipeline import Extractor, ExtractMedia
from .detected_faces import DetectedFaces
@ -569,7 +569,7 @@ class TkGlobals():
"""
if os.path.isdir(frames_location):
retval = False
elif os.path.splitext(frames_location)[1].lower() in _video_extensions:
elif os.path.splitext(frames_location)[1].lower() in VIDEO_EXTENSIONS:
retval = True
else:
logger.error("The input location '%s' is not valid", frames_location)

View File

@ -10,7 +10,7 @@ from multiprocessing import Process
from lib.align import Alignments
from lib.utils import _video_extensions
from lib.utils import VIDEO_EXTENSIONS
from plugins.extract.pipeline import ExtractMedia
from .loader import Loader
@ -64,7 +64,7 @@ class Mask: # pylint:disable=too-few-public-methods
retval = [os.path.join(self._args.input, fname)
for fname in os.listdir(self._args.input)
if os.path.isdir(os.path.join(self._args.input, fname))
or os.path.splitext(fname)[-1].lower() in _video_extensions]
or os.path.splitext(fname)[-1].lower() in VIDEO_EXTENSIONS]
logger.info("Batch mode selected. Processing locations: %s", retval)
return retval