mirror of
https://github.com/zebrajr/faceswap.git
synced 2025-12-06 00:20:09 +01:00
Remove DetectedFace object from plugins due to multiprocess error
This commit is contained in:
parent
02935a9b42
commit
fb76438615
|
|
@ -201,7 +201,7 @@ Setting up Faceswap can seem a little intimidating to new users, but it isn't th
|
|||
|
||||
At the time of writing Tensorflow (version 1.12) only supports Cuda up to version 9.0, but check https://www.tensorflow.org/install/gpu for the latest supported version. It is crucial that you download the correct version of Cuda.
|
||||
|
||||
Download and install the the correct version of the Cuda Toolkit from: https://developer.nvidia.com/cuda-toolkit-archive
|
||||
Download and install the correct version of the Cuda Toolkit from: https://developer.nvidia.com/cuda-toolkit-archive
|
||||
|
||||
NB: Make a note of the install folder as you'll need to access it in the next step.
|
||||
|
||||
|
|
@ -214,8 +214,8 @@ Download cuDNN from https://developer.nvidia.com/cudnn. You will need to create
|
|||
|
||||
At the bottom of the list of latest cuDNN release will be a link to "Archived cuDNN Releases". Select this and choose the latest version of cuDNN that supports the version of Cuda you installed and is less than or equal to the latest version that Tensorflow supports. (Eg Tensorflow 1.12 supports Cuda 9.0 and cuDNN 7.2. There is not an archived version of cuDNN 7.2 for Cuda 9.0, so select cuDNN version 7.1)
|
||||
- Open the zip file
|
||||
- Extract all of the files and folders into your Cuda folder:\
|
||||

|
||||
- Extract all of the files and folders into your Cuda folder (It is likely to be located in `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA`):\
|
||||

|
||||
|
||||
### CMake
|
||||
Install the latest stable release of CMake from https://cmake.org/download/. (Scroll down the page for Latest Releases and select the relevant Binary distribution installer for your OS).
|
||||
|
|
|
|||
|
|
@ -8,12 +8,12 @@
|
|||
The plugin will receive a dict containing:
|
||||
{"filename": <filename of source frame>,
|
||||
"image": <source image>,
|
||||
"detected_faces": <list of DetectedFaces objects without landmarks>}
|
||||
"detected_faces": <list of DlibRectangles>}
|
||||
|
||||
For each source item, the plugin must pass a dict to finalize containing:
|
||||
{"filename": <filename of source frame>,
|
||||
"image": <source image>,
|
||||
"detected_faces": <list of final DetectedFaces objects>}
|
||||
"detected_faces": <list of tuples containing (dlibRectangle, Landmarks)>}
|
||||
"""
|
||||
|
||||
import os
|
||||
|
|
@ -24,12 +24,9 @@ from lib.gpu_stats import GPUStats
|
|||
|
||||
class Aligner():
|
||||
""" Landmarks Aligner Object """
|
||||
def __init__(self, verbose=False, align_eyes=False, size=256, padding=48):
|
||||
def __init__(self, verbose=False):
|
||||
self.verbose = verbose
|
||||
self.size = size
|
||||
self.padding = padding
|
||||
self.cachepath = os.path.join(os.path.dirname(__file__), ".cache")
|
||||
self.align_eyes = align_eyes
|
||||
self.extract = Extract()
|
||||
self.init = None
|
||||
|
||||
|
|
@ -79,20 +76,8 @@ class Aligner():
|
|||
if output == "EOF":
|
||||
self.queues["out"].put("EOF")
|
||||
return
|
||||
self.align_faces(output)
|
||||
self.queues["out"].put((output))
|
||||
|
||||
def align_faces(self, output):
|
||||
""" Align the faces """
|
||||
detected_faces = output["detected_faces"]
|
||||
image = output["image"]
|
||||
|
||||
for face in detected_faces:
|
||||
face.load_aligned(image,
|
||||
size=self.size,
|
||||
padding=self.padding,
|
||||
align_eyes=self.align_eyes)
|
||||
|
||||
# <<< MISC METHODS >>> #
|
||||
def get_vram_free(self):
|
||||
""" Return free and total VRAM on card with most VRAM free"""
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class Align(Aligner):
|
|||
""" Initialization tasks to run prior to alignments """
|
||||
super().initialize(*args, **kwargs)
|
||||
print("Initializing Dlib Pose Predictor...")
|
||||
self.model = dlib.shape_predictor(self.model_path)
|
||||
self.model = dlib.shape_predictor(self.model_path) # pylint: disable=c-extension-no-member
|
||||
self.init.set()
|
||||
print("Initialized Dlib Pose Predictor.")
|
||||
|
||||
|
|
@ -37,13 +37,15 @@ class Align(Aligner):
|
|||
self.queues["out"].put(item)
|
||||
break
|
||||
image = item["image"][:, :, ::-1].copy()
|
||||
self.process_landmarks(image, item["detected_faces"])
|
||||
item["detected_faces"] = self.process_landmarks(image, item["detected_faces"])
|
||||
self.finalize(item)
|
||||
self.finalize("EOF")
|
||||
|
||||
def process_landmarks(self, image, detected_faces):
|
||||
""" Align image and process landmarks """
|
||||
retval = list()
|
||||
for detected_face in detected_faces:
|
||||
process_face = detected_face.to_dlib_rect()
|
||||
pts = self.model(image, process_face).parts()
|
||||
detected_face.landmarksXY = [(point.x, point.y) for point in pts]
|
||||
pts = self.model(image, detected_face).parts()
|
||||
landmarks = [(point.x, point.y) for point in pts]
|
||||
retval.append((detected_face, landmarks))
|
||||
return retval
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ class Align(Aligner):
|
|||
self.queues["out"].put(item)
|
||||
break
|
||||
image = item["image"][:, :, ::-1].copy()
|
||||
self.process_landmarks(image, item["detected_faces"])
|
||||
item["detected_faces"] = self.process_landmarks(image, item["detected_faces"])
|
||||
self.finalize(item)
|
||||
self.finalize("EOF")
|
||||
except:
|
||||
|
|
@ -76,13 +76,13 @@ class Align(Aligner):
|
|||
|
||||
def process_landmarks(self, image, detected_faces):
|
||||
""" Align image and process landmarks """
|
||||
retval = list()
|
||||
for detected_face in detected_faces:
|
||||
process_face = detected_face.to_dlib_rect()
|
||||
center, scale = self.get_center_scale(process_face)
|
||||
center, scale = self.get_center_scale(detected_face)
|
||||
aligned_image = self.align_image(image, center, scale)
|
||||
detected_face.landmarksXY = self.predict_landmarks(aligned_image,
|
||||
center,
|
||||
scale)
|
||||
landmarks = self.predict_landmarks(aligned_image, center, scale)
|
||||
retval.append((detected_face, landmarks))
|
||||
return retval
|
||||
|
||||
def get_center_scale(self, detected_face):
|
||||
""" Get the center and set scale of bounding box """
|
||||
|
|
|
|||
|
|
@ -13,8 +13,6 @@
|
|||
|
||||
import os
|
||||
|
||||
from copy import copy
|
||||
|
||||
import cv2
|
||||
import dlib
|
||||
|
||||
|
|
@ -31,9 +29,6 @@ class Detector():
|
|||
self.parent_is_pool = False
|
||||
self.init = None
|
||||
|
||||
# Detected_Face Object. Passed in from initialization to avoid race condition
|
||||
self.obj_detected_face = None
|
||||
|
||||
# The input and output queues for the plugin.
|
||||
# See lib.multithreading.QueueManager for getting queues
|
||||
self.queues = {"in": None, "out": None}
|
||||
|
|
@ -75,7 +70,6 @@ class Detector():
|
|||
self.init = init
|
||||
self.queues["in"] = kwargs["in_queue"]
|
||||
self.queues["out"] = kwargs["out_queue"]
|
||||
self.obj_detected_face = kwargs["detected_face"]
|
||||
|
||||
def detect_faces(self, *args, **kwargs):
|
||||
""" Detect faces in rgb image
|
||||
|
|
@ -92,29 +86,8 @@ class Detector():
|
|||
def finalize(self, output):
|
||||
""" This should be called as the final task of each plugin
|
||||
Performs fianl processing and puts to the out queue """
|
||||
detected_faces = self.to_detected_face(output["image"],
|
||||
output["detected_faces"])
|
||||
output["detected_faces"] = detected_faces
|
||||
self.queues["out"].put(output)
|
||||
|
||||
def to_detected_face(self, image, dlib_rects):
|
||||
""" Convert list of dlib rectangles to a
|
||||
list of DetectedFace objects
|
||||
and add the cropped face """
|
||||
retval = list()
|
||||
for d_rect in dlib_rects:
|
||||
if not isinstance(
|
||||
d_rect,
|
||||
dlib.rectangle): # pylint: disable=c-extension-no-member
|
||||
retval.append(list())
|
||||
continue
|
||||
this_face = copy(self.obj_detected_face)
|
||||
this_face.from_dlib_rect(d_rect)
|
||||
this_face.image_to_face(image)
|
||||
this_face.frame_dims = image.shape[:2]
|
||||
retval.append(this_face)
|
||||
return retval
|
||||
|
||||
# <<< DETECTION IMAGE COMPILATION METHODS >>> #
|
||||
def compile_detection_image(self, image, is_square, scale_up):
|
||||
""" Compile the detection image """
|
||||
|
|
|
|||
|
|
@ -184,22 +184,41 @@ class Extract():
|
|||
|
||||
def process_faces(self, filename, faces):
|
||||
""" Perform processing on found faces """
|
||||
size = self.args.size if hasattr(self.args, "size") else 256
|
||||
align_eyes = self.args.align_eyes if hasattr(self.args, "align_eyes") else False
|
||||
|
||||
final_faces = list()
|
||||
save_queue = queue_manager.get_queue("save")
|
||||
|
||||
filename = faces["filename"]
|
||||
image = faces["image"]
|
||||
output_file = faces["output_file"]
|
||||
|
||||
for idx, face in enumerate(faces["detected_faces"]):
|
||||
detected_face = self.align_face(image, face, align_eyes, size)
|
||||
|
||||
if self.export_face:
|
||||
save_queue.put((filename,
|
||||
output_file,
|
||||
face.aligned_face,
|
||||
detected_face.aligned_face,
|
||||
idx))
|
||||
|
||||
final_faces.append(face.to_alignment())
|
||||
final_faces.append(detected_face.to_alignment())
|
||||
self.alignments.data[os.path.basename(filename)] = final_faces
|
||||
|
||||
@staticmethod
|
||||
def align_face(image, face, align_eyes, size, padding=48):
|
||||
""" Align the detected face """
|
||||
detected_face = DetectedFace()
|
||||
detected_face.from_dlib_rect(face[0])
|
||||
detected_face.landmarksXY = face[1]
|
||||
detected_face.frame_dims = image.shape[:2]
|
||||
detected_face.load_aligned(image,
|
||||
size=size,
|
||||
padding=padding,
|
||||
align_eyes=align_eyes)
|
||||
return detected_face
|
||||
|
||||
|
||||
class Plugins():
|
||||
""" Detector and Aligner Plugins and queues """
|
||||
|
|
@ -267,20 +286,8 @@ class Plugins():
|
|||
""" Set global arguments and load aligner plugin """
|
||||
aligner_name = self.args.aligner.replace("-", "_").lower()
|
||||
|
||||
# Align Eyes
|
||||
align_eyes = False
|
||||
if hasattr(self.args, 'align_eyes'):
|
||||
align_eyes = self.args.align_eyes
|
||||
|
||||
# Extracted Face Size
|
||||
size = 256
|
||||
if hasattr(self.args, 'size'):
|
||||
size = self.args.size
|
||||
|
||||
aligner = PluginLoader.get_aligner(aligner_name)(
|
||||
verbose=self.args.verbose,
|
||||
align_eyes=align_eyes,
|
||||
size=size)
|
||||
verbose=self.args.verbose)
|
||||
|
||||
return aligner
|
||||
|
||||
|
|
@ -321,8 +328,7 @@ class Plugins():
|
|||
""" Launch the face detector """
|
||||
out_queue = queue_manager.get_queue("detect")
|
||||
kwargs = {"in_queue": queue_manager.get_queue("load"),
|
||||
"out_queue": out_queue,
|
||||
"detected_face": DetectedFace()} # Passed in to avoid race condition
|
||||
"out_queue": out_queue}
|
||||
|
||||
if self.args.detector == "mtcnn":
|
||||
mtcnn_kwargs = self.detector.validate_kwargs(
|
||||
|
|
|
|||
|
|
@ -890,7 +890,8 @@ class MouseHandler():
|
|||
landmarks = queue_manager.get_queue("out").get()
|
||||
if landmarks == "EOF":
|
||||
exit(0)
|
||||
alignment = landmarks["detected_faces"][0].to_alignment()
|
||||
alignment = self.extracted_to_alignment(landmarks["detected_faces"][0])
|
||||
print(alignment)
|
||||
frame = self.media["frame_id"]
|
||||
|
||||
if self.interface.get_selected_face_id() is None:
|
||||
|
|
@ -904,3 +905,15 @@ class MouseHandler():
|
|||
|
||||
self.interface.state["edit"]["updated"] = True
|
||||
self.interface.state["edit"]["update_faces"] = True
|
||||
|
||||
def extracted_to_alignment(self, extract_data):
|
||||
""" Convert Extracted Tuple to Alignments data """
|
||||
alignment = dict()
|
||||
d_rect, landmarks = extract_data
|
||||
alignment["x"] = d_rect.left()
|
||||
alignment["w"] = d_rect.right() - d_rect.left()
|
||||
alignment["y"] = d_rect.top()
|
||||
alignment["h"] = d_rect.bottom() - d_rect.top()
|
||||
alignment["frame_dims"] = self.media["image"].shape[:2]
|
||||
alignment["landmarksXY"] = landmarks
|
||||
return alignment
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user